Vathsa's- Linux - SysOps and DevOps






AWS - Auto Scale magento- Ecommerce web servers
--------------------------------------------------------------


AZURE SAMPLE WORK-1
Cluster 1


AWS- Project 1




AWS  VPC-Private - Project 1


**********************************************
        Megento - my_first_cluster
**********************************************


*******************************************



Microsoft AZURE Commands - Day to Day
-------------------------------------------------------------------------------------------


#!/bin/bash
RgName="MyResourceGroup"
Location="eastus"

# Create a resource group.
az group create   --name $RgName   --location $Location


# Create an availability set for the two VMs that host both websites.


az vm availability-set create   --resource-group $RgName   --location $Location  --name MyAvailabilitySet  --platform-fault-domain-count 2  --platform-update-domain-count 2

# Create a virtual network and a subnet.

az network vnet create  --resource-group $RgName  --name MyVnet  --address-prefix 10.0.0.0/16   --location $Location  --subnet-name MySubnet   --subnet-prefix 10.0.0.0/24



# Create three public IP addresses; one for the load balancer and two for the front-end IP configurations.

az network public-ip create  --resource-group $RgName  --name MyPublicIp-LoadBalancer 
  --allocation-method Dynamic

az network public-ip create  --resource-group $RgName --name MyPublicIp-Contoso  --allocation-method Dynamic

az network public-ip create --resource-group $RgName --name MyPublicIp-Fabrikam  --allocation-method Dynamic

# Create a load balancer.

az network lb create  --resource-group $RgName --location $Location --name MyLoadBalancer 
  --frontend-ip-name FrontEnd  --backend-pool-name BackEnd  --public-ip-address MyPublicIp-LoadBalancer

# Create two front-end IP configurations for both web sites.

az network lb frontend-ip create  --resource-group $RgName  --lb-name MyLoadBalancer  --public-ip-address MyPublicIp-Contoso  --name FeContoso

az network lb frontend-ip create --resource-group $RgName  --lb-name MyLoadBalancer  --public-ip-address MyPublicIp-Fabrikam  --name FeFabrikam

# Create the back-end address pools.

az network lb address-pool create  --resource-group $RgName --lb-name MyLoadBalancer  --name BeContoso

az network lb address-pool create  --resource-group $RgName  --lb-name MyLoadBalancer  --name BeFabrikam

# Create a probe on port 80

az network lb probe create  --resource-group $RgName --lb-name MyLoadBalancer  --name MyProbe 
  --protocol Http  --port 80 --path 

# Create the load balancing rules

az network lb rule create --resource-group $RgName --lb-name MyLoadBalancer  --name LBRuleContoso  --protocol Tcp  --probe-name MyProbe   --frontend-port 5000   --backend-port 5000 \
  --frontend-ip-name FeContoso  --backend-pool-name BeContoso


az network lb rule create  --resource-group $RgName  --lb-name MyLoadBalancer  --name LBRuleFabrikam  --protocol Tcp  --probe-name MyProbe  --frontend-port 5000  --backend-port 5000 \
  --frontend-ip-name FeFabrikam  --backend-pool-name BeFabrikam

# ############## VM1 ###############

# Create an Public IP for the first VM

az network public-ip create  --resource-group $RgName  --name MyPublicIp-Vm1  --allocation-method Dynamic

# Create a network interface for VM1

az network nic create  --resource-group $RgName  --vnet-name MyVnet  --subnet MySubnet  --name MyNic-Vm1   --public-ip-address MyPublicIp-Vm1

# Create IP configurations for POOL1 and POOL2

az network nic ip-config create  --resource-group $RgName  --name ipconfig2  --nic-name MyNic-Vm1  --lb-name MyLoadBalancer   --lb-address-pools BeContoso

az network nic ip-config create  --resource-group $RgName   --name ipconfig3  --nic-name MyNic-Vm1  --lb-name MyLoadBalancer   --lb-address-pools BeFabrikam

# Create Vm

az vm create  --resource-group $RgName  --name MyVm1   --nics MyNic-Vm1 \
  --image UbuntuLTS  --availability-set MyAvailabilitySet   --admin-username azureadmin   --generate-ssh-keys

############### VM2 ###############

# Create an Public IP for the second VM

az network public-ip create  --resource-group $RgName  --name MyPublicIp-Vm2  --allocation-method Dynamic
# Create a network interface for VM2.

az network nic create  --resource-group $RgName  --vnet-name MyVnet  --subnet MySubnet  --name MyNic-Vm2  --public-ip-address MyPublicIp-Vm2

# Create IP-Configs for Contoso and Fabrikam

az network nic ip-config create  --resource-group $RgName   --name ipconfig2  --nic-name MyNic-Vm2  --lb-name MyLoadBalancer   --lb-address-pools BeContoso

az network nic ip-config create  --resource-group $RgName  --name ipconfig3   --nic-name MyNic-Vm2  --lb-name MyLoadBalancer  --lb-address-pools BeFabrikam


# Create Vm2


az vm create  --resource-group $RgName --name MyVm2  --nics MyNic-Vm2  --image UbuntuLTS  --availability-set MyAvailabilitySet   --admin-username azureadmin  --generate-ssh-keys

Azure virtual network

- create, change, or delete

-------------------------------------------------------------------------------------------------

az network vnet create -g MyRG -n VNET1
az network vnet subnet create -g MyRG --vnet VNET1 -n S1
az network public-ip create -g MyRG -n IP1 --allocation-method static --dns-name MyDNS

az network lb create -g MyRG -n LB1 --frontend-ip-name LBFE --backend-pool-name LBBE

az network lb inbound-nat-rule create -g MyRG -n SSH1 --lb-name LB1 --backend-port 22 \
     --frontend-port 21 --frontend-ip-name LBFE --protocal tcp
az network lb inbound-nat-rule create -g MyRG -n SSH2 --lb-name LB1 --backend-port 22 \
     --frontend-port 23 --frontend-ip-name LBFE --protocal tcp

az network lb probe create -g MyRG -n HTTPPROBE --lb-name LB1 --port 80 --protocol tcp

az network lb rule create -g MyRG -n HTTP1 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \
     --frontend-ip-name LBFE  --frontend-port 80 \
     --backend-pool-name LBBE --backend-port 80

az network lb rule create -g MyRG -n HTTP2 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \
     --frontend-ip-name LBFE  --frontend-port 1234 \
     --backend-pool-name LBBE --backend-port 8000

az network lb show -g MyRG -n LB1

az network nic create -g MyRG -n NIC1 --subnet-name S1 --vnet-name VNET1 \
     --lb-address-pool-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/backendAddressPools/LBBE' \
     --lb-nat-rule-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/inboundNatRules/ssh1'

az network nic create -g MyRG -n NIC2 --subnet-name S1 --vnet-name VNET1 \
     --lb-address-pool-ids 'LBBE' \
     --lb-nat-rule-ids 'ssh1'

az vm availability-set create -g MyRG -n AS1

az vm create -g MyRG -n VM1 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC1
az vm create -g MyRG -n VM2 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC2


Docker Commands and Best Practices

-----------------------------------------------------------------------------

Before we get into the best practices for using Docker, here’s a quick overview of the vocabulary you should know:
  • Layer: a set of read-only files or commands that describe how to set up the underlying system beneath the container.
  • Layers are built on top of each other, and each one represents a change to the filesystem.
  • Image: an immutable layer that forms the base of the container.
  • Container: an instance of the image that can be executed as an independent application.
  • The container has a mutable layer that lies on top of the image and that is separate from the underlying layers.
  • Registry: a storage and content delivery system used for distributing Docker images.
  • Repository: a collection of related Docker images, often different versions of the same application.


Developing with Docker Containers:

  • docker create [image]: Create a new container from a particular image.
  • docker login: Log into the Docker Hub repository.
  • docker pull [image]: Pull an image from the Docker Hub repository.
  • docker push [username/image]: Push an image to the Docker Hub repository.
  • docker search [term]: Search the Docker Hub repository for a particular term.
  • docker tag [source] [target]: Create a target tag or alias that refers to a source image.


Running Docker Containers

  • docker start [container]: Start a particular container.
  • docker stop [container]: Stop a particular container.
  • docker exec -ti [container] [command]: Run a shell command inside a particular container.
  • docker run -ti — image [image] [container] [command]:
  • Create and start a container at the same time, and then run a command inside it.
  • docker run -ti — rm — image [image] [container] [command]:
  • Create and start a container at the same time, run a command inside it,
  • and then remove the container after executing the command.
  • docker pause [container]: Pause all processes running within a particular container.


Using Docker Utilities:

  • docker history [image]: Display the history of a particular image.
  • docker images: List all of the images that are currently stored on the system.
  • docker inspect [object]: Display low-level information about a particular Docker object.
  • docker ps: List all of the containers that are currently running.
  • docker version: Display the version of Docker that is currently installed on the system.


Cleaning Up Your Docker Environment:

  • docker kill [container]: Kill a particular container.
  • docker kill $(docker ps -q): Kill all containers that are currently running.
  • docker rm [container]: Delete a particular container that is not currently running.
  • docker rm $(docker ps -a -q): Delete all containers that are not currently running.
Hopefully this guide will serve as your go to Docker cheat sheet.
If there is anything I missed, please let me know and I will happily add it.
Docker: All the Most Essential Commands in One Place

Linux Mysql Command Help and Examples

MySQL, the most popular Open Source SQL database management system, is developed, distributed, and supported by Oracle Corporation's article describe the procedure to install mysql and perform initial configuration required as part of many third party application installations.

Installation of MySql and Confiruring SSL

Download and install the required mysql related packages
yum install mysql-server perl-DBD-MySQL perl-DBI
Start mysql.
service mysqld start
Optionally set mysqld to start at boot
chckconfig mysqld on
Change mysql root password
/usr/bin/mysqladmin -u root password ‘mysql’
Configure SSL for mysql server and the clients that will access the server
mkdir -p /etc/mysql/newcerts  
chown -R mysql:mysql /etc/mysql/newcerts
Create a certificate authority
cd /etc/mysql/newcerts  
openssl genrsa 2048 > ca-key.pem
NOTE This command will ask details of your certificate provider, provide a unique Common Name when asked
openssl req -new -x509 -nodes -days 1000 -key ca-key.pem > ca-cert.pem
Create a certificate for the server using the CA certificate generated above
NOTE Do not provide a password if asked in the next step
The Common Name used here must differ from the one used for the Certificate Authority above.
openssl req -newkey rsa:2048 -days 1000 -nodes -keyout server-key.pem > server-req.pem
openssl x509 -req -in server-req.pem -days 1000 -CA ca-cert.pem -CAkey ca-key.pem -set_serial 01 > server-cert.pem
Create a certificate for the clients using the same CA certificate
NOTE You must provide the details for the client that will connect to the server.
The Common Name used here must differ from the one used for the Certificate Authority and the Server certificate above.
openssl req -newkey rsa:2048 -days 1000 -nodes -keyout client-key.pem > client-req.pem 
openssl x509 -req -in client-req.pem -days 1000 -CA ca-cert.pem -CAkey ca-key.pem -set_serial 01 > client-cert.pem
Make sure following entries are present in /etc/my.cnf under the [mysqld] section
ssl
ssl-ca=/etc/mysql/newcerts/ca-cert.pem
ssl-cert=/etc/mysql/newcerts/server-cert.pem
ssl-key=/etc/mysql/newcerts/server-key.pem
Restart mysqld
service mysqld restart
Ensure that mysql root is authenticated with SSL and has correct permissions
NOTE : use your mysql root password here.
mysql -u root -p  
mysql> GRANT ALL ON *.* TO ‘root’@’%’ IDENTIFIED BY ‘mysql’ REQUIRE SSL;
mysql> quit
Test that SSL is working
login to the database
cd /etc/mysql/newcerts  
mysql –ssl-cert=ca-cert.pem –ssl-key=client-key.pem –ssl-cert=client-cert.pem -u root -p -v -v -v  
Enter password: <password>
Check for the ciphers
mysql> SHOW STATUS LIKE ‘Ssl_cipher';  
————–  
SHOW STATUS LIKE ‘Ssl_cipher’  
————–  
+—————+——————–+  
| Variable_name | Value              |  
+—————+——————–+  
| Ssl_cipher    | DHE-RSA-AES256-SHA |
+—————+——————–+  
1 row in set (0.00 sec)  
mysql> show variables like ‘%%ssl%%';  
————–  
show variables like ‘%%ssl%%’  
————–  
+—————+————————————-+  
| Variable_name | Value                               |  
+—————+————————————-+  
| have_openssl  | YES                                 |  
| have_ssl      | YES                                 | 
| ssl_ca        | /etc/mysql/newcerts/ca-cert.pem     |  
| ssl_capath    |                                     |  
| ssl_cert      | /etc/mysql/newcerts/server-cert.pem |  
| ssl_cipher    |                                     |  
| ssl_key       | /etc/mysql/newcerts/server-key.pem  |  
+—————+————————————-+  
7 rows in set (0.01 sec)  
mysql> quit

Configuring Remote access to MySQL Server
 On  MySQL server:
1. Install mysql-server package on server
# yum install mysql-server
2. Edit /etc/my.cnf file
# vi /etc/my.cnf
Search for [mysqld] section, If skip-networking line exists then comment out this line and add following entry:
bind-address=<server IP address>
3. Restart the mysql server:
# service mysqld restart
4. Connect to mysql server and provide access to remote system:
mysql> CREATE DATABASE <db-name>;
mysql> GRANT ALL ON <db-name>.* TO <username>@'<IP Address>’ IDENTIFIED BY ‘PASSWORD';
Where,     <username>@'<IP address>’ –  is the address of the  remote host machine
PASSWORD – is the password used to connect mysql  from remote system. replace “PASSWORD” with actual password.
4. Exit from mysql
mysql> exit
5. Open TCP port 3306 on server to provide access to remote  system:
# iptables -I INPUT -p tcp –dport 3306 -j ACCEPT
# service iptables save
On the Remote Client Server
1. Install mysql package
# yum install mysql
2. Execute following command to connect mysql:
# mysql -u <username> –h <Server IP address> –p

How to Reset MYSQL Database Root Password
 The MySQL root password can be set using the following procedure:
Stop the MySQL service:
 # service mysqld stop
Stopping MySQL:                                            [  OK  ]
 Start MySQL with:
# /usr/bin/mysqld_safe –skip-grant-tables &
On Red Hat Enterprise Linux 3, mysqld_safe was called safe_mysqld:
# /usr/bin/safe_mysqld –skip-grant-tables &
 Note: mysql_safe is a shell script which invokes mysqld, but additionally traps any forceful terminations of the MySQL server and avoids any database corruption.
Change the password of the root user:
 # mysql -u root mysql
mysql> UPDATE user SET Password=PASSWORD(‘new_password’) WHERE user=’root';
mysql> FLUSH PRIVILEGES;
mysql> exit;
 Restart mysqld using mysqladmin to ensure that the service shuts down successfully (as it was started manually in step 2).
# mysqladmin shutdown
 Restart the MySQL service as per normal:

# service mysqld start  


Mysql DB-Backup

--------------------------------------------------------------
#!/bin/bash

# database credentials
DATABASEHOST="host"
DATABASEUSER="user"
DATABASEPASSWORD="password"
DATABASESCHEMA="database"
DATABASEENV="backup"

# Local directory of mysqldump file
LOCALDIR=/home/user/db-bkp

# Temporary directory for compressed file
TEMPDIR=/home/user/tmp

# Remote Directory for backups.
REMOTEDIR=/home/user/db-bkp

# Backup host to login to
BACKUPHOST="otherhost"
#--- end config

echo $(date +%H:%M)
echo "Creating the MySQL dump" 
mysqldump --host="$DATABASEHOST" --user="$DATABASEUSER" --password="$DATABASEPASSWORD" --single-transaction "$DATABASESCHEMA" > "$LOCALDIR"/"$(date +%Y%m%d)_bkp_$DATABASESCHEMA.sql"

#echo "Generating md5sum"
md5sum "$LOCALDIR"/* > "$LOCALDIR"/checklist.chk

#echo "Compressing the dump and checklist"
tar -cvzf "$TEMPDIR"/$(date +%Y%m%d)"_"$DATABASEENV"_"$DATABASESCHEMA".tar.gz" "$LOCALDIR"/*

#echo "Sending the compressed file to d2"
## scp "$TEMPDIR"/"$DATABASESCHEMA".tar.gz "$BACKUPHOST":"$REMOTEDIR"

#echo "Removing generated files"
#rm "$LOCALDIR"/checklist.chk > /dev/null 2>&1

#rm "$LOCALDIR"/"$(date +%Y%m%d)_bkp_$DATABASESCHEMA.sql" > /dev/null 2>&1

#rm "$TEMPDIR"/"$DATABASESCHEMA".tar.gz > /dev/null 2>&1

rm "$LOCALDIR"/"$(date +%Y%m%d)_bkp_$DATABASESCHEMA.sql"
echo $(date +%H:%M)



Linux (Redhat, Centos) iptables and firewalld Commands
-----------------------------------------------------------------------------------------------

Example with details:

FIREWALLD



#!/bin/bash

##HTTP

sudo firewall-cmd --zone=public --add-port=80/tcp --permanent

##SSH

sudo firewall-cmd --zone=public --add-port=22/tcp --permanent

##HTTPS

sudo firewall-cmd --zone=public --add-port=443/tcp --permanent

##UD Scanner / Ansible/HP SiteScope

sudo firewall-cmd --zone=public --add-port=8443/tcp --permanent

##JDBC

sudo firewall-cmd --zone=public --add-port=8888/tcp --permanent

##Ansible

sudo firewall-cmd --zone=public --add-port=5986/tcp --permanent

##MySql

sudo firewall-cmd --zone=public --add-port=3306/tcp --permanent

##MacaFee

sudo firewall-cmd --zone=public --add-port=57398/tcp --permanent

##SyslogNG

sudo firewall-cmd --zone=public --add-port=3164/tcp --permanent

sudo firewall-cmd --zone=public --add-port=314/tcp --permanent

##NTP(udp/tcp)

sudo firewall-cmd --zone=public --add-port=123/udp --permanent

##OpenVPN

sudo firewall-cmd --zone=public --add-port=1194/tcp --permanent

##Chronyd

sudo firewall-cmd --zone=public --add-port=323/udp --permanent

##Reload service

sudo firewall-cmd --reload

sudo firewall-cmd --list-all


sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="22" accept'

sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="udp" port="123" accept'

sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="80" accept'

sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="443" accept'

sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="8443" accept'



#!/bin/bash

## Default firewall Rules applied on VM
## Version 1.0 ##
## Author: SKN ##
sudo systemctl start firewalld
sudo systemctl enable firewalld

sudo firewall-cmd --state
sudo firewall-cmd --reload
sudo systemctl status firewalld

sudo firewall-cmd --get-zones
sudo firewall-cmd --get-default-zone
sudo firewall-cmd --get-active-zones
sudo firewall-cmd --list-all
sudo firewall-cmd --list-services --zone=public

sudo firewall-cmd --zone=public --add-service=http --permanent
sudo firewall-cmd --zone=public --add-service=https --permanent
sudo firewall-cmd --zone=public --add-service=ssh --permanent
sudo firewall-cmd --zone=public --add-service=chrony --permanent
sudo firewall-cmd --zone=public --add-service=crond --permanent
sudo firewall-cmd --zone=public --add-service=syslog --permanent
sudo firewall-cmd --reload


## Get all Zone Details
for z in $(firewall-cmd --get-zones)
do
echo "Services allowed in $z zone: $(sudo firewall-cmd --list-services --zone=$z)"
done

IPT='iptables'
$IPT -F
$IPT -P INPUT DROP
$IPT -P FORWARD DROP
$IPT -P OUTPUT ACCEPT


#Accept on localhost
$IPT -A INPUT -i lo -j ACCEPT
$IPT -A OUTPUT -o lo -j ACCEPT


#Allow established sessions to receive traffic and ssh
$IPT -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
$IPT -I INPUT -p tcp --dport 22 -j ACCEPT
iptables-save
$IPT -L -v




                                    iptables - How-to - With Examples 

     ------------------------------------------------------------------------------------------------------



Enable the service on persistent reboot.
chkconfig iptables on
How to Start and Stop service
/etc/init.d/iptables start
/etc/init.d/iptables start
/etc/init.d/iptables status

Following are the possible special values that you can specify in the target.
ACCEPT – Firewall will accept the packet.
DROP – Firewall will drop the packet.
QUEUE – Firewall will pass the packet to the userspace.
RETURN – Firewall will stop executing the next set of rules in the current chain for this packet.
The control will be returned to the calling chain.

List the rules
# iptables -L -n -v
The rules in the iptables –list command output contains the following fields:
num – Rule number within the particular chain
target – Special target variable that we discussed above
prot – Protocols. tcp, udp, icmp, etc.,
opt – Special options for that specific rule.
source – Source ip-address of the packet
destination – Destination ip-address for the packet

Before you start building new set of rules, you might want to clean-up all the default rules, and existing rules.
# iptables -F
(or)
# iptables --flush
The default chain policy is ACCEPT. Change this to DROP for all INPUT, FORWARD,
and OUTPUT chains as shown below.
#iptables -P INPUT DROP
#iptables -P FORWARD DROP
#iptables -P OUTPUT DROP

# Local loop
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT

# Connections already established
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# PING
iptables -A INPUT -p icmp --icmp-type 8 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -p icmp --icmp-type 8 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
# DNS
# UDP
iptables -A INPUT -i eth0 -p udp --sport 53 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p udp --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
# TCP
iptables -A INPUT -i eth0 -p tcp --sport 53 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT

# SSH
# Incoming
iptables -A INPUT -i eth0 -p tcp --dport 22 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 22 -m state --state ESTABLISHED -j ACCEPT
# Outgoing
iptables -A INPUT -i eth0 -p tcp --sport 22 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 22 -m state --state NEW,ESTABLISHED -j ACCEPT

# HTTP
Incoming
iptables -A INPUT -i eth0 -p tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 80 -m state --state ESTABLISHED -j ACCEPT
Outgoing
iptables -A INPUT -i eth0 -p tcp --sport 80 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT

#HTTPS
Incoming
iptables -A INPUT -i eth0 -p tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 443 -m state --state ESTABLISHED -j ACCEPT
Outgoing
iptables -A INPUT -i eth0 -p tcp --sport 443 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT

# FTP
Incoming
iptables -A INPUT -i eth0 -p tcp --dport 21 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 21 -m state --state ESTABLISHED -j ACCEPT
Logs
iptables -A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 4

At the End DROP everything else except above rules
DROP everything else
iptables -A INPUT -j DROP
iptables -A OUTPUT -j DROP
iptables -A FORWARD -j DROP
-- eth0 should be replaced with respective device.
-- Only use required rules on your device.

Table 11-3. DSCP target options


Option

--set-dscp


Example

iptables -A OUTPUT -p tcp --dport 53 -j DSCP --set-dscp 1


Explanation

This sets the DSCP value to the specified value. The values can be set either via class, see below, or with the --set-dscp, which takes either an integer value, or a hex value.


Option

--set-dscp-class


Example

iptables -A OUTPUT -p tcp --dport 53 -j DSCP --set-dscp-class AF21


Explanation

This sets the DSCP field according to a predefined DiffServ class. Some of the possible values are EF, BE and the CSxx and AFxx values available. You can find more information at Implementing Quality of Service Policies with DSCP site. Do note that the --set-dscp-class and --set-dscp commands are mutually exclusive, which means you can not use both of them in the same command!

iptables -t mangle -A OUTPUT -p udp -m udp --sport <SRCPRT> --dport 53 -J DSCP --set-dscp-class af21

Please use the following command to allow outgoing DNS requests.
iptables -A OUTPUT -p udp -m udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p udp -m udp --sport 53 -j DSCP --set-dscp 0x10

The iptables firewall can modify packets in the mangle table.
For locally generated traffic, use the OUTPUT or POSTROUTING chain.
Some possible examples are:
# iptables -t mangle -A OUTPUT -p udp --dport 53 -j DSCP --set-dscp-class AF11
# iptables -t mangle -A POSTROUTING -p udp -m udp --sport 8000:8009 -j DSCP --set-dscp 0x0a
We can make use of the mangle table - which allows us to modify packets before they leave the system.


iptables -t mangle -A OUTPUT -j DSCP --set-dscp-class AF21 -m comment --comment "set dscp class to AF21 for all outbound traffic"


FIREWALLD - The firewalld-cmd utility:

firewalld has the following advantages over iptables :
1. Unlike the iptables command, the firewall-cmd command does not restart the firewall and disrupt established TCP connections.
2. firewalld supports dynamic zones.
3. firewalld supports D-Bus for better integration with services that depend on firewall configuration.
Configuration options

The firewalld service has two types of configuration options:
1. Runtime: Changes to firewall settings take effect immediately but are not permanent. Changes made in runtime configuration mode are lost when the firewalld service is restarted.
2. Permanent: Changes to firewall settings are written to configuration files. These changes are applied when the firewalld service restarts.
Configuration files

Configuration files for firewalld exist in two directories:
/usr/lib/firewalld: Contains default configuration files. Do not make changes to these files. An upgrade of the firewalld package overwrites this directory.
/etc/firewalld: Changes to the default configuration files are stored in this directory.Files in this directory overload the default configuration files.
firewalld zones

The firewalld service allows you to separate networks into different zones based on the level of trust you want to place on the devices and traffic within a specific network. For each zone you can define the following features:
Services: Predefined or custom services to trust. Trusted services are a combination of ports and protocols that are accessible from other systems and networks.
Ports: Additional ports or port ranges and associated protocols that are accessible from other systems and networks.
Masquerading: Translate IPv4 addresses to a single external address. With masquerading enabled, addresses of a private network are mapped to and hidden behind a public address.
Port Forwarding: Forward inbound network traffic from a specific port or port range to an alternative port on the local system, or to a port on another IPv4 address.
ICMP Filter: Block selected Internet Control Message Protocol messages.
Rich Rules: Extend existing firewalld rules to include additional source and destination addresses and logging and auditing actions.
Interfaces: Network interfaces bound to the zone. The zone for an interface is specified with the ZONE=option in the /etc/sysconfig/network-scripts/ifcfg file. If the option is missing, the interface is bound to the default zone.
Predefined firewalld Zones

The firewalld software package includes a set of predefined network zones in the following directory:

# ls -lrt /usr/lib/firewalld/zones/

total 36

-rw-r----- 1 root root 342 Sep 15 2015 work.xml

-rw-r----- 1 root root 162 Sep 15 2015 trusted.xml

-rw-r----- 1 root root 315 Sep 15 2015 public.xml

-rw-r----- 1 root root 415 Sep 15 2015 internal.xml

-rw-r----- 1 root root 400 Sep 15 2015 home.xml

-rw-r----- 1 root root 304 Sep 15 2015 external.xml

-rw-r----- 1 root root 291 Sep 15 2015 drop.xml

-rw-r----- 1 root root 293 Sep 15 2015 dmz.xml

-rw-r----- 1 root root 299 Sep 15 2015 block.xml

The zone files contain preset settings, which can be applied to a network interface. For example:

# grep –i service /usr/lib/firewalld/zones/public.xml

<service name=“ssh”/>

<service name=“dhcpv6-client”/>

In this example, network interfaces bound to the public zone trust only two services, ssh and dhcpv6-client.

A brief explanation of each zone follows:
drop: Any incoming network packets are dropped, there is no reply. Only outgoing
network connections are possible.
block: Any incoming network connections are rejected with an icmp-host- prohibited message for IPv4 and icmp6-adm-prohibited for IPv6. Only network connections initiated from within the system are possible.
home: For use in home areas. You mostly trust the other computers on networks to not harm your computer. Only selected incoming connections are accepted.
public: For use in public areas. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted.
work: For use in work areas. You mostly trust the other computers on networks to not harm your computer. Only selected incoming connections are accepted.
dmz: For computers in your demilitarized zone that are publicly accessible with limited access to your internal network. Only selected incoming connections are accepted.
external: For use on external networks with masquerading enabled especially for routers. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted.
internal: For use on internal networks. You mostly trust the other computers on the networks to not harm your computer. Only selected incoming connections are accepted.
trusted: All network connections are accepted.
Setting the Default firewalld Zone

After an initial installation, the public zone is the default zone as specified in the configuration file, /etc/firewalld/firewalld.conf.

# grep –i defaultzone /etc/firewalld/firewalld.conf

DefaultZone=public

Network interfaces are bound to the default zone unless specified with ZONE=[zone] in the ifcfg file. The following command shows the interfaces that are bound to the public zone:

# firewall-cmd --get-active-zone

public

interfaces: eth0 eth1

You can use the firewall-cmd command to change the default zone:

# firewall-cmd --set-default-zone=work

success

You can also use the firewall-config GUI to change the default zone. From the menu bar, select Options->Change Default Zone, and then select a zone from a pop-up list.
firewalld Services

– A firewalld service is a combination of local ports and protocols and destination addresses.
– A firewalld service can also include Netfilter kernel modules that are automatically loaded when a service is enabled.
– The firewalld software package includes a set of predefined services in the following directory:

# ls -lrt /usr/lib/firewalld/zones/

total 36

-rw-r----- 1 root root 342 Sep 15 2015 work.xml

-rw-r----- 1 root root 162 Sep 15 2015 trusted.xml

-rw-r----- 1 root root 315 Sep 15 2015 public.xml

-rw-r----- 1 root root 415 Sep 15 2015 internal.xml

-rw-r----- 1 root root 400 Sep 15 2015 home.xml

-rw-r----- 1 root root 304 Sep 15 2015 external.xml

-rw-r----- 1 root root 291 Sep 15 2015 drop.xml

-rw-r----- 1 root root 293 Sep 15 2015 dmz.xml

-rw-r----- 1 root root 299 Sep 15 2015 block.xml

– Services can be enabled for a zone in Runtime mode.
– Service definitions can only be edited in Permanent mode.
Start firewalld

To start firewalld:

# systemctl start firewalld

To ensure firewalld starts at boot time:

# systemctl enable firewalld

To check if firewalld is running:

# systemctl status firewalld

# firewall-cmd --state

Three methods to configure the firewalld service:
– firewall-cmd : Command-line interface
– firewall-config : Graphical user interface
– Edit various XML configuration files.

CentOS / RHEL 7/8 : How to start / Stop Firewalld

The command-line tool firewall-cmd is part of the firewalld application, which is installed by default. To get help on the firewall-cmd command:

# firewall-cmd --help

The firewall-cmd command offers categories of options such as General, Status, Permanent, Zone, IcmpType, Service, Adapt and Query Zones, Direct, Lockdown, Lockdown Whitelist, and Panic. To list information for all zones:

# firewall-cmd --list-all-zones public (default, active)

interfaces: eth0 eth1

sources:

services: dhcpv6-client ssh


ports:

...

To permit access by HTTP clients for the public zone:

# firewall-cmd --zone=public --add-service=http

success

To list services that are allowed for the public zone:

# firewall-cmd --zone=work --list-services

dhcpv6-client http ssh


Using this command only changes the Runtime configuration and does not update the configuration files.
The configuration changes made in Runtime configuration mode are lost when the firewalld service is restarted:

# systemctl restart firewalld

# firewall-cmd --zone=work --list-services dhcpv6-client ssh

To make changes permanent, use the –permanent option. Example:

# firewall-cmd --permanent --zone=public --add-service=http

success

Changes made in Permanent configuration mode are not implemented immediately. However, changes made in Permanent configuration are written to configuration files. Restarting the firewalld service reads the configuration files and implements the changes. Example:

# systemctl restart firewalld

# firewall-cmd --zone=work --list-services

dhcpv6-client http ssh


Set up Magento 2 with Redis, Varnish and Nginx as SSL termination

Table of Contents

Login to your VPS via SSH


Update the system and install necessary packages


Install MariaDB 10.6


Install PHP 7.0, composer and all required PHP modules


Install Magento 2 from Github


Install and configure Nginx


Install and configure Varnish


Install and configure Redis caching


Further Optimizations
Login to your VPS via SSH

ssh my_sudo_user@my_server
Update the system and install necessary packages

sudo apt-get update && sudo apt-get -y upgrade

sudo apt-get -y install curl nano git



Install MariaDB 10.6

Install the latest MariaDB server from the official Ubuntu repositories:

sudo apt-get install -y mariadb-server-10.6

When the installation is complete, run the following command to secure your installation:

mysql_secure_installation

Next, we need to create a database for our Magento installation.

mysql -uroot -p

MariaDB [(none)]> CREATE DATABASE magento;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON magento.* TO 'magento'@'localhost' IDENTIFIED BY 'my_strong_password';

MariaDB [(none)]> FLUSH PRIVILEGES;

MariaDB [(none)]> \q
Install PHP 7.4, composer and all required PHP modules

To install the latest stable version of PHP 7.4 and all necessary modules, run:

sudo apt-get -y install php-fpm php-cli php-gd php-imagick php-mysql php-mcrypt php-pear php-curl php-intl php-xsl php-zip php-mbstring


Change few default PHP settings:

sudo sed -i "s/memory_limit = .*/memory_limit = 256M/" /etc/php/7.0/fpm/php.ini

sudo sed -i "s/upload_max_filesize = .*/upload_max_filesize = 128M/" /etc/php/7.0/fpm/php.ini

sudo sed -i "s/zlib.output_compression = .*/zlib.output_compression = on/" /etc/php/7.0/fpm/php.ini

sudo sed -i "s/max_execution_time = .*/max_execution_time = 18000/" /etc/php/7.0/fpm/php.ini



The composer is a dependency manager for PHP with which you can install packages. The composer will pull in all the required libraries and dependencies you need for your project.

curl -sS https://getcomposer.org/installer | php

sudo mv composer.phar /usr/local/bin/composer

Install Magento 2 from Github

Clone the Magento repository to the ~/myMagentoSite.com directory using the following command:

sudo git clone https://github.com/magento/magento2.git /var/www/myMagentoSite.com

Get the latest stable release, at the time of the writing it’s Magento 2.4.4:

cd /var/www/myMagentoSite.com

sudo git checkout $(git describe --tags $(git rev-list --tags --max-count=1))

Run composer to install all Magento dependencies:

sudo composer install

To continue with the installation you can either use the installation wizard or the command line, in this guide we will use the latter.

sudo bin/magento setup:install \

--base-url=http://myMagentoSite.com/ \

--db-host=localhost \

--db-name=magento \

--db-user=magento \

--db-password=my_strong_password \

--admin-firstname=First \

--admin-lastname=Last \

--admin-email=user@myMagentoSite.com \

--admin-user=admin \

--admin-password=my_strong_password123 \

--language=en_US \

--currency=USD \

--timezone=America/Chicago \

--use-rewrites=1


If the installation is successful you will see something like below:

[SUCCESS]: Magento installation complete.

[SUCCESS]: Magento Admin URI: /admin_mejj1n


Run the crontab command to create a cronjob

crontab -u www-data -e

and add the following line:

* * * * * /usr/bin/php /var/www/myMagentoSite.com/bin/magento cron:run | grep -v "Ran jobs by schedule" >> /var/www/myMagentoSite.com/var/log/magento.cron.log



Finally, set the correct permissions:

sudo chown -R www-data: /var/www/myMagentoSite.com
Install and configure Nginx

Install Nginx from the official Ubuntu repositories::

sudo apt-get -y install nginx

Create a new Nginx server block with the following content:

sudo nano /etc/nginx/sites-available/myMagentoSite.com

upstream fastcgi_backend {

server unix:/run/php/php7.0-fpm.sock;

}

server {

server_name myMagentoSite.com www.myMagentoSite.com;

listen 80;

set $MAGE_ROOT /var/www/myMagentoSite.com;

set $MAGE_MODE developer; # or production

access_log /var/log/nginx/myMagentoSite.com-access.log;

error_log /var/log/nginx/myMagentoSite.com-error.log;

include /var/www/myMagentoSite.com/nginx.conf.sample;

}


Activate the server block by creating a symbolic link :

sudo ln -s /etc/nginx/sites-available/myMagentoSite.com /etc/nginx/sites-enabled/myMagentoSite.com

Delete the default configuration:

sudo rm -f /etc/nginx/sites-enabled/default

Test the Nginx configuration and restart nginx:

sudo nginx -t

sudo service nginx restart


You should be now able to login to your Magento back-end by going to http://myMagentoSite.com/admin_mejj1n using the information you set when running the bin/magento setup:install .
Install and configure Varnish

Installing Varnish is as simple as running the following command:

sudo apt-get install varnish

From your Magento Admin dashboard click on the STORES link (left sidebar) -> Configuration -> ADVANCED -> System -> Full Page Cache
Unselected Use system value and from the Caching Application list, select Varnish Cache (Recommended), save the configuration, click on the Varnish Configuration link and click on the Export VCL for Varnish 4 button. The varnish.vcl file which we will use will be exported in the directory /var/www/myMagentoSite.com/var/.

Flush the Magento cache with:

sudo php bin/magento cache:flush

Delete the /etc/varnish/default.vcl and symlink it to the exported varnish configuration.

sudo rm -f /etc/varnish/default.vcl

sudo ln -sf /var/www/myMagentoSite.com/var/varnish.vcl /etc/varnish/default.vcl





To change the varnish port from 6081 to 80, we need to edit the systemd service configuration.

Create a new customexec.conf file

sudo mkdir -p /etc/systemd/system/varnish.service.d

sudo nano /etc/systemd/system/varnish.service.d/customexec.conf


paste the following:

[Service]

ExecStart=

ExecStart=/usr/sbin/varnishd -j unix,user=vcache -F -a :80 -T localhost:6082 -f /etc/varnish/default.vcl -S /etc/varnish/secret -s malloc,256m


and reload systemd units

sudo systemctl daemon-reload

Now we need to change Nginx listening port from 80 to 8080 and enable Nginx SSL termination with HTTP2, to do that open the Nginx configuration file and change it as follows:

sudo nano /etc/nginx/sites-available/myMagentoSite.com

upstream fastcgi_backend {

server unix:/run/php/php7.0-fpm.sock;

}

server {

server_name myMagentoSite.com www.myMagentoSite.com;

listen 8080;

set $MAGE_ROOT /var/www/myMagentoSite.com;

set $MAGE_MODE production; # or developer

access_log /var/log/nginx/myMagentoSite.com-access.log;

error_log /var/log/nginx/myMagentoSite.com-error.log;


include /var/www/myMagentoSite.com/nginx.conf.sample;

}

server {

listen 443 ssl http2;

server_name myMagentoSite.com www.myMagentoSite.com;

ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem; # change with your SSL cert

ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key; # change with your SSL key

ssl_protocols TLSv1 TLSv1.1 TLSv1.2;

ssl_ciphers 'AES128+EECDH:AES128+EDH:!aNULL';

ssl_session_cache shared:SSL:10m;

ssl_session_timeout 24h;

keepalive_timeout 300s;


location / {

proxy_pass http://127.0.0.1;

proxy_set_header Host $http_host;

proxy_set_header X-Forwarded-Host $http_host;

proxy_set_header X-Real-IP $remote_addr;

proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

proxy_set_header Ssl-Offloaded "1";

proxy_set_header X-Forwarded-Proto https;

proxy_set_header X-Forwarded-Port 443;

#proxy_hide_header X-Varnish;

#proxy_hide_header Via;

proxy_set_header X-Forwarded-Proto $scheme;


}

}


If you don’t already have an SSL certificate, you can purchase a trusted SSL certificate.

Restart Varnish and Nginx:

sudo systemctl restart nginx

sudo systemctl restart varnish


Change the base URL to https and flush the cache

sudo bin/magento setup:store-config:set --base-url="https://myMagentoSite.com"

sudo php bin/magento cache:flush





If everything is setup correctly now you should be able to login to your Magento back-end by going to https://myMagentoSite.com/admin_mejj1n.
Install and configure Redis caching

Redis is a key-value in memory data store and we will use it to replace the default Magento 2 Zend_Cache_Backend_File backend cache. Install Redis by running the following command:

apt-get install php-redis redis-server

To configure your Magento installation to use Redis for session storage open the app/etc/env.php file and change/add the following:

sudo nano /var/www/myMagentoSite.com/app/etc/env.php

change:

'session' =>

array (

'save' => 'files',

),


with:

'session' =>

array (

'save' => 'redis',

'redis' =>

array (

'host' => '127.0.0.1',

'port' => '6379',

'password' => '',

'timeout' => '2.5',

'persistent_identifier' => '',

'database' => '0',

'compression_threshold' => '2048',

'compression_library' => 'gzip',

'log_level' => '1',

'max_concurrency' => '6',

'break_after_frontend' => '5',

'break_after_adminhtml' => '30',

'first_lifetime' => '600',

'bot_first_lifetime' => '60',

'bot_lifetime' => '7200',

'disable_locking' => '0',

'min_lifetime' => '60',

'max_lifetime' => '2592000'

)

),


and to use Redis for page caching add:

'cache' =>

array(

'frontend' =>

array(

'default' =>

array(

'backend' => 'Cm_Cache_Backend_Redis',

'backend_options' =>

array(

'server' => '127.0.0.1',

'port' => '6379'

),

),

'page_cache' =>

array(

'backend' => 'Cm_Cache_Backend_Redis',

'backend_options' =>

array(

'server' => '127.0.0.1',

'port' => '6379',

'database' => '1',

'compress_data' => '0'

)

)

)

),



Finally flush the cache again:

sudo php bin/magento cache:flush

Further Optimizations

To further optimize your Magento installation from you Magento admin dashboard:

1. Go to STORES -> Configuration -> CATALOG -> Catalog -> Use Flat Catalog Category, select Yes and click Save Config.
2. Go to STORES -> Configuration -> ADVANCED -> Developer -> JavaScript Settings and set both Merge JavaScript Files and Minify JavaScript Files to Yes and click Save Config..
3. Go to STORES -> Configuration -> ADVANCED -> Developer -> CSS Settings and set both Merge CSS Files and Minify CSS Files to Yes and click Save Config.
4. Consider using a CDN – Content Delivery Network

Do not forget to flush the cache:

sudo php bin/magento cache:flush


That’s it. You have successfully installed Magento 2 with Redis as a session storage and page caching, Varnish as a full page caching and Nginx as SSL termination on your Ubuntu 16.04 VPS. For more information about how to manage your Magento installation, please refer to the official Magento documentation.



Of course, you don’t have to do any of this if you use one of our Magento VPS Hosting services, in which case you can simply ask our expert Linux admins to setup this for you. They are available 24×7 and will take care of your request immediately.

PS. If you liked this post please share it with your friends on the social networks using the buttons on the left or simply leave a reply below. Thanks.


RED-HAT SATALLITE AND SPACEWALK 

How to Fix Error: do not have any GPG public keys installed in Redhat Linux
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release


Register system to your spacewalk 
rhnreg_ks --sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT  --serverUrl=https://spacewalk7.sknlab.com/XMLRPC --activationkey=1-fe6b9cb939a39ae7268f1e29ed37fa2d


subscription-manager Unable to verify server's identity: certificate verify failed

[root@web-ser1 tmp]# subscription-manager register --username satadmin --password ***&****
Registering to: dcplabsat8.local:8443/rhsm
Unable to verify server's identity: certificate verify failed

Fix =

Update following config to point correct URL.

[root@web-ser1 tmp]# vim /etc/rhsm/rhsm.conf

# Server hostname:
hostname = dcplabsat8.local

If you feel, conf file mess up then simply remove following package & install again. 
All should be fine. 

# yum remove python-rhsm subscription-manager
# yum clean all
# yum install python-rhsm subscription-manager

Adding system to satellite server 

subscription-manager config --rhsm.baseurl=https://dcplabsat8.local/pulp/content --server.hostname=dcplabsat8.local

curl -sS --insecure 'https://dcplabsat8.local/register?activation_keys=My_Activation_Key' -H 'Authorization: Bearer eyJhbGciOiJIUzI1NiJ9.eyJ1c2VyX2lkIjo0LCJpYXQiOjE3MDA3ODgxMzAsImp0aSI6IjViNWQ3M2FmM2ZjMTBlZTJhNDE2ZDc5MWIzMDFkOTFlYmM1MTBiZjBhODIyYmRiYmUxOWY1MmI3NzhlYTBlYzAiLCJleHAiOjE3MDA4MDI1MzAsInNjb3BlIjoicmVnaXN0cmF0aW9uI2dsb2JhbCByZWdpc3RyYXRpb24jaG9zdCJ9.FhhEGHgtqfwv7N9kjcJm-kzTsfGkdwdQDhJkhhz_9uc' | bash

subscription-manager register

yum install rhn-client-tools rhn-check rhn-setup rhnsd m2crypto yum-rhn-plugin

wget -q -O /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT http://spacewalk7.sknlab.com/pub/RHN-ORG-TRUSTED-SSL-CERT


rhnreg_ks --sslCACert=/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT  --serverUrl=https://spacewalk7.sknlab.com/XMLRPC --activationkey=1-fe6b9cb939a39ae7268f1e29ed37fa2d

HOW SYNC REPOSITORY UNDER CHANNELS 

spacewalk-repo-sync --list
spacewalk-repo-sync --channel oraclelinux9-x86_64-addons --sync-kickstart

SYNC INCLUDING ERRATA

spacewalk-repo-sync --channel oraclelinux9-x86_64-appstream --sync-kickstart --force-errata
 spacewalk-repo-sync --channel oraclelinux9-x86_64 --sync-kickstart --no-errata 

INSTALL SPACEWALK SERVER
 
      yum install epel-release -y yum-plugin-tmprepo 
     yum install -y spacewalk-repo --tmprepo=https://copr-   be.cloud.fedoraproject.org/results/%40spacewalkproject/spacewalk-2.9/epel-7-x86_64/repodata/repomd.xml --nogpg
      yum -y install spacewalk-setup-postgresql
      firewall-cmd --permanent --add-service={http,https}
      firewall-cmd --permanent --add-port={5222,5269}/tcp
      firewall-cmd --permanent --add-port=69/udp
      firewall-cmd --reload
      yum -y install spacewalk-setup-postgresql spacewalk-postgresql
      cat << 'EOF' | tee /root/spacewalk-answers
     admin-email = admin@sknlab.com
     ssl-set-org = sknlab
     ssl-set-org-unit = IT
     ssl-set-city = Bangalore
     ssl-set-state = Karnataka
     ssl-set-country = India
     ssl-password = spacewalk
     ssl-set-email = admin@sknlab.com
     ssl-config-sslvhost = Y
     db-backend=postgresql
     db-name=spaceschema
     db-user=sknspace7
     db-password=Welcome123
     db-host=localhost
     db-port=5432
     enable-tftp=Y
     EOF
   
     spacewalk-setup --answer-file=/root/spacewalk-answer


     echo "alias spacecmd='spacecmd -q'" >> ~/.bashrc
     exit
     cat << 'EOF' | ~/.spacecmd/config 
     [spacecmd] 
     server=spacewalk7.sknlab.com
    username=sknspace7
     password=Welcome123
     EOF

    yum install httpd
    systemctl status httpd
    systemctl enable httpd
    systemctl start httpd

check the attached channels

spacewalk-report  channel
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY



check available channels

spacewalk-common-channels  -l

HOW TO SETUP NEW SATALLITE SERVER

subscription-manager register

subscription-manager list --all --available --matches 'Red Hat Satellite Infrastructure Subscription'

subscription-manager list --all --available

subscription-manager attach --pool=8a85f9a17bcaabb5017bff1c5aa15071

subscription-manager repos --disable "*"


subscription-manager repos --enable=rhel-8-for-x86_64-baseos-rpms --enable=rhel-8-for-x86_64-appstream-rpms --enable=satellite-6.14-for-rhel-8-x86_64-rpms --enable=satellite-maintenance-6.14-for-rhel-8-x86_64-rpms

dnf module enable satellite:el8
systemctl enable --now chronyd
rpm --rebuilddb
dnf clean packages
yum update "*dnf*" libsolv
yum update python39\*

satellite-installer --scenario satellite --foreman-initial-organization "My_Organization" --foreman-initial-location "My_Location" --foreman-initial-admin-username satadmin --foreman-initial-admin-password password

tail /var/log/foreman-installer/satellite.log -f

hammer host-registration generate-command --activation-keys "My_Activation_Key" --insecure true

hammer repository-set enable --basearch="x86_64" --name "Red Hat Satellite Client 6 for RHEL 8 x86_64 (RPMs)" --organization "My_Organization" --product "Red Hat Enterprise Linux for x86_64"

HOW TO EXECUTE REMOTE SCRIPTS 

satellite-installer --scenario capsule --enable-foreman-proxy-plugin-remote-execution-script

# ssh-copy-id -i ~foreman-proxy/.ssh/id_rsa_foreman_proxy.pub user@sknol9.sknlab.com



How to Recover the Base Yum Repository Configuration

Perform this task if the system's base repository configuration has been corrupted or otherwise lost.
Create a temporary repository configuration file in /etc/yum.repos.d.

sudo mkdir /etc/yum.repos.d/temp_base.repo

Populate the file with entries corresponding to the system's OS version.

For Oracle Linux 9:

[ol9_baseos_latest]
name=Oracle Linux 9 BaseOS Latest ($basearch)
baseurl=https://yum$ociregion.$ocidomain/repo/OracleLinux/OL9/baseos/latest/$basearch/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
gpgcheck=1
enabled=1

For Oracle Linux 8:

[ol8_baseos_latest]
name=Oracle Linux 8 BaseOS Latest ($basearch)
baseurl=https://yum$ociregion.$ocidomain/repo/OracleLinux/OL8/baseos/latest/$basearch/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
gpgcheck=1
enabled=1

Reinstall the required release packages of the system's OS version to set up the standard yum repository configurations.

sudo dnf reinstall oraclelinux-release-olrelease-nbr

Verify that the recovery is successful.

ls /etc/yum.repos.d/oraclelinux-release-olrelease-nbr
Remove the temporary configuration file.

rm /etc/yum.repos.d/temporary_base.repo

Reinstall other required release packages to obtain the correct repository configurations.

sudo dnf reinstall repository

Enable the repositories that you need.

sudo dnf config-manager --enable repository




SUBSCRIPTION-MANAGER COMMAND CHEAT SHEET 
for Red Hat Enterprise Linux
SUBCOMMAND DESCRIPTIONS AND TASKS


-h or --help Prints the specific help information for the given command subscription-manager attach --help Gives the help information for the attach command

REGISTER A NEW SYSTEM TO THE SUBSCRIPTION-MANAGER 

SUBCOMMAND DESCRIPTIONS AND TASKS


register Registers this system to the Customer Portal or another subscription management service
-------------------------------------------------------
subscription-manager register --username=jo@example.com Gives the username for the account which is registering the system (required if not using activation key)

subscription-manager register --password=MyKul22pwd Gives the user account password (required if not using activation key)

subscription-manager register --serverurl=host.example.com Registers system with host other than subscription.rhn.redhat.com

subscription-manager register --baseurl=https://host:8088/sam Passes the name of the content delivery service to configure the yum service to use to pull down packages

subscription-manager register --name=host.example.com Sets the name of the system to register (defaults to hostname)

subscription-manager register --consumerid=12345d6d-83ed-44f8-99bf-14f605bbeXXX References an existing system inventory ID to re-register a system

subscription-manager register --activationkey=Example_Key1 Gives a comma-separated list of product keys to use to redeem or apply specific subscriptions to the system

subscription-manager register --auto-attach Automatically attaches the best-matched, compatible subscriptions to this system

subscription-manager register --auto-attach --servicelevel=standard Sets the service level (standard, premium, or selfsupport) to use with subscriptions

subscription-manager register --force Registers the system even if it is already registered

subscription-manager register --org=Example_Org1 Assigns the system to an organization

subscription-manager registerm--environment=Example_Env Registers the system to an environment within an organization

subscription-manager register --proxy=host:3128 --proxyuser=jo --proxypass=MyKul22pwd Registers system via a proxy server

 
UNREGISTER AND REMOVE SUBSCRIPTIONS FROM THE SYSTEM SUBCOMMAND

subscription-manager remove --serial=5282836853581912345 Gives the serial number of the subscription certificate for the specific product to remove from the system

subscription-manager remove --all Removes all of the subscriptions attached to a system

unregister Unregister this system from the Customer Portal or another subscription management service

remove Removes a subscription from the system, but does not uninstall the associated products

clean Removes all local system and subscription data without affecting the server (effectively doing an unregister and remove)

status Shows the current status of the products and attached subscriptions for the system

LIST SUBSCRIPTION AND PRODUCT INFORMATION FOR THE SYSTEM

list Lists subscription and product information for this system 

subscription-manager list --available Lists available subscriptions not yet attached to the system

subscription-manager list --available --all Lists all possible subscriptions that have been purchased, even if they do not match the system architecture

subscription-manager list --available --match-installed Shows only subscriptions matching products that are currently installed

subscription-manager list --available --no-overlap Shows pools which provide products that are not already covered

subscription-manager list --available --ondate=2020-12-25 Sets the date to use to search for active and available subscriptions

subscription-manager list --consumed Lists all subscriptions currently attached to the system

subscription-manager list --installed Lists products (subscribed or not) currently installed on the system.


ATTACH OR AUTO-ATTACH SUBSCRIPTION TO THE SYSTEM SUBCOMMADD

attach Attaches a specified subscription to the registered system 

subscription-manager attach --pool=8af5f9643d4ade76013123451f6e495d Gives the ID for the subscriptions pool (collection of products) to attach to the system (required unless using --auto)

subscription-manager attach --quantity=1 Sets the number of subscriptions attached to the system (default 1)

subscription-manager attach --auto Automatically attaches the best-matched compatible subscriptions to the system

subscription-manager attach --servicelevel=standard Sets the service level (standard, premium, or selfsupport) for subscriptions attached to the system

auto-attach Sets whether the ability to check, attach, and update subscriptions occurs automatically on the system every four hours 

subscription-manager auto-attach --enable Enables the auto-attach option for the system 

subscription-manager auto-attach --show Shows whether auto-attach is enabled on the systems

 subscription-manager auto-attach --disable Disables the auto-attach option for the system


PULL LATEST SUBSCRIPTION DATA FROM SERVER

refresh    Pulls the latest subscription data from the server

import     Imports and applies an externally generated subscription certificate (useful for subscribing disconnected systems)

subscription-manager import --certificate=/path/to/cert.pem Points to a certificate PEM file containing the subscription certificate

identity   Lists the system identity, name, organization name and organization ID

subscription-manager identity  --regenerate Requests that the subscription management service issue a new identity certificate for the system, using an existing UUID in the original identity certificate

subscription-manager identity --regenerate --force Regenerates the identity certificate for the system using username/password authentication

orgs    Lists all the organizations associated with an account

subscription-manager orgs --serverurl=host.sknlab.com Passes the name of the subscription service to use to list all available organizations

plugins  Lists the available subscription-manager plugins

subscription-manager plugins --list Lists the available subscription-manager plugins

subscription-manager plugins --listslots Lists the available plugin slots

subscription-manager plugins --listhooks Lists the available plugin slots and the hooks that handle them

subscription-manager plugins --verbose Shows other plugins data, such as plugin configuration values

redeem   For systems purchased from third-party vendors that include a subscription, the redemption process auto-attaches the pre-selected subscription that the vendor supplied to the system.

subscription-manager redeem --email=admin@sknlabs.com Gives the email account to send the redemption notification message to

subscription-manager redeem --locale=en-us Sets the local language to use for email notification

subscription-manager redeem --org=Example_Org1 Identifies the organization which issued the subscription being redeemed

release   Sets a sticky OS version to use when installing or updating packages

subscription-manager release --list Lists the available OS versions

subscription-manager release --set=6.3 Sets the minor (Y-stream) release version to use (6.3)

subscription-manager release --unset Removes any previously set release version preference

Other Modules

config Changes settings in the rhsm.conf configuration file 

subscription-manager config --list Prints the current configuration for Subscription Manager 

subscription-manager config --remove=server.proxy_port Deletes the current value for the proxy_port without supplying a new parameter (server can be rhsm or rhsmcertd instead) 

subscription-manager config --server.proxy_port=999 Sets proxy_port value in [server] section to 999

environments   Lists all the environments configured for an organization with a locally-hosted subscription or content service, such as Subscription Asset Manager 

subscription-manager environments --username=admin@sknlabs.com Gives the username for the account to use to connect to the organization account. 

subscription-manager environments --password=MyKul22pwd Gives the user account password 

subscription-manager environments --org=Example_Org1 Identifies the organization for which to list the configured environments

facts    Lists the system information, such as release version, number of CPUs, and other architecture information
 
subscription-manager facts --list Lists the system information 

subscription-manager facts --update Updates the system information

repo-override   Manage custom content repository settings 

subscription-manager repo-override --repo=rhel-7-workstation-htb-rpms Selects the repository to modify (can be specified more than once)

subscription-manager repo-override --add=enabled:1 Adds a named override with the provided value, separated by a colon, to repos specified (used with the --repo option) 

subscription-manager repo-override  --remove=enabled Removes a named override from the repos specified with the --repo option 

subscription-manager repo-override --remove-all Removes all overrides from repos specified with the --repo option s

subscription-manager repo-override --list Lists all overrides from repos specified with the --repo option
 
repos   Lists all the repositories available to a system (for organizations with a locally-hosted content service, such as Subscription Asset Manager 

subscription-manager repos --list Lists all repositories provided by the content service that the system uses

subscription-manager repos --enable=rhel-7-server-supplementary-rpms Enables the specified repository, made available by the content sources identified in the system subscriptions 

subscription-manager repos --disable=rhel-7-server-supplementary-rpms Disables the specified repository, made available by the content sources identified in the system subscriptions 

service-level    Displays the current configured service level preference (standard, premium, or self-support) for products installed on the system 

subscription-manager service-level --list Lists the available service levels 

subscription-manager service-level --show Shows the system’s current service-level preference subscription-manager service-level --unset Removes any previously set service level preference 

version   Displays version information about the subscription management service and packages

Registering system  to your local satallite server example

# rpm -Uvh http://dcplabsat8.local/pub/katello-ca-consumer-latest.noarch.rpm

# subscription-manager register --serverurl=http://dcplabsat8.local


MY Best practice


[root@localhost ~]# subscription-manager release --set=x.y
No releases match 'x.y'.  Consult 'release --list' for a full listing.

Resolution

  • Reinstall subscription-manager package and then perform set release.

  • If same issue is noticed even after re-installing subscription-manager package then set release while registering the system using following commands.

# subscription-manager remove --all 
# subscription-manager unregister 
# subscription-manager clean 
# subscription-manager register --auto-attach --release=x.y 
# subscription-manager release --show

Satellite 6: How to enable and synchronize repositories via command line?


hammer  shell --username <username of satellite admin account> --password  <password> 
hammer> product list --organization-label Default_Organization
hammer> repository-set list --organization-label Default_Organization --product "Red Hat Enterprise Linux Server"
hammer > repository-set available-repositories --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 168
For enable the base repo
repository-set enable --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 168 --releasever 6Server --basearch x86_64

For enable any child repo 
repository-set enable --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 168 --releasever 6.3 --basearch x86_64
hammer> repository synchronize --async --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 1



Create a Local Yum Repository for Oracle Linux 8

Configure Server Repositories


Repository Creation


Resync the Repository


Setup the HTTP Server


Point Servers to the Local Repository

Make sure the repositories of interest are available on the server,

# vim oel8-tmp.repo

[ol8_baseos_latest]

name=Oracle Linux $releasever BaseOS ($basearch)

baseurl=https://yum.oracle.com/repo/OracleLinux/OL8/baseos/latest/$basearch

gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle

gpgcheck=1

enabled=1


Install OEL8 related repo files from below command

# dnf reinstall oraclelinux-release-el8

# dnf clean packages

# dnf install oracle-epel-release-el8.x86_64 oracle-gluster-release-el8.x86_64 oracle-spacewalk-client-release-el8.x86_64 oraclelinux-developer-release-el8.x86_64 oraclelinux-release-el8.x86_64

# dnf install oracle-epel-release-el8.x86_64 oracle-gluster-release-el8.x86_64 oracle-spacewalk-client-release-el8.x86_64 oraclel



IMPORT RPM GPG KEY

# curl https://yum.oracle.com/RPM-GPG-KEY-oracle-ol8 -o RPM-GPG-KEY-oracle

# gpg --quiet --keyid-format 0xlong --with-fingerprint RPM-GPG-KEY-oracle

# yum install yum-utils createrepo


This may install some additional packages to support other repositories, depending on what repositories you already had enabled before you started the process. Let's take a look at the contents of the "/etc/yum.repos.d" directory now.

-rw-r--r--. 1 root root 216 Sep 22 08:49 oel8-tmp.repo

-rw-r--r--. 1 root root 212 Sep 24 12:41 ol8-addon.repo

-rw-r--r--. 1 root root 252 Jul 23 05:50 oracle-epel-ol8.repo

-rw-r--r--. 1 root root 246 Mar 4 2020 oracle-gluster-ol8.repo

-rw-r--r--. 1 root root 459 Dec 13 2019 oraclelinux-developer-ol8.repo

-rw-r--r--. 1 root root 1565 Apr 28 21:05 oracle-linux-ol8.repo

-rw-r--r--. 1 root root 249 Aug 5 00:59 oracle-spacewalk-client-ol8.repo

-rw-r--r--. 1 root root 470 Jul 23 05:50 uek-ol8.repo


Repository Creation

Install the following packages, which include the utilities necessary to set up the repository.

# yum install yum-utils createrepo

Create the following directories to hold the main OS and UEK respoitories.

# mkdir -p /u01/repo/OracleLinux

# mkdir -p /u01/repo/logs

# mkdir -p /u01/repo/scripts


If you've done a default installation of Oracle Linux 8, the "ol8_baseos_latest" and "ol8_UEKR6" repositories should already be enabled in the "/etc/yum.repos.d/public-yum-ol8.repo" file, but it's worth checking before you continue.

The reposync command is used to synchronize a remote yum repository to a local directory, using yum to retrieve the packages.

#/usr/bin/reposync --newest-only --repoid=ol8_baseos_latest -p /u01/repo/OracleLinux

# /usr/bin/reposync --newest-only --repoid=ol8_UEKR6 -p /u01/repo/OracleLinux

# /usr/bin/reposync --newest-only --repoid=ol8_appstream -p /u01/repo/OracleLinux

# /usr/bin/reposync --newest-only --repoid=ol8_spacewalk210_client --repoid=ol8_gluster_appstream -p /u01/repo/OracleLinux

# /usr/bin/reposync --newest-only --repoid=ol8_developer -p /u01/repo/OracleLinux

# /usr/bin/reposync --newest-only --repoid=ol8_addons -p /u01/repo/OracleLinux



It takes a long time to sync the repositories the first time, so be patient. I waited overnight for the 32G of downloads to complete. Subsequent refreshes only bring across the changed packages, so they are much quicker. The "newest-only" option reduces the total size of the download.

Once complete, you can create the repositories from the local directories using the createrepo command.

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_baseos_latest/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_appstream/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_gluster_appstream/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_addons/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer_EPEL/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_spacewalk210_client/getPackage

# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_UEKR6/getPackage

Resync the Repository

A resync of the Yum repositories involves repeating the reposync and createrepo commands, so you should script them and run them from CRON. Create a script called "/u01/repo/scripts/repo_sync.sh" with the following contents.

#!/bin/bash

## Download Latest Repo (RPM's) from OEL Repository 8 (OEL 8)

LOG_FILE=/u01/repo/logs/repo_sync_$(date +%Y.%m.%d).log

# Remove old logs

find /u01/repo/logs/repo_sync* -mtime +5 -delete; >> $LOG_FILE 2>&1


# Sync repositories

/usr/bin/reposync --newest-only --repoid=ol8_baseos_latest -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_developer -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_developer_EPEL -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_spacewalk210_client -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_UEKR6 -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_appstream -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_gluster_appstream -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1

/usr/bin/reposync --newest-only --repoid=ol8_addons -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1




/usr/bin/createrepo /u01/repo/OracleLinux/ol8_baseos_latest/getPackage/ >> $LOG_FILE 2>&1

/usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer4/getPackage/ >> $LOG_FILE 2>&1

/usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer_EPEL/getPackage/ >> $LOG_FILE 2>&1

/usr/bin/createrepo /u01/repo/OracleLinux/ol8_spacewalk210_client/getPackage/ >> $LOG_FILE 2>&1

/usr/bin/createrepo /u01/repo/OracleLinux/ol8_UEKR6/getPackage/ >> $LOG_FILE 2>&1

/usr/bin/createrepo /u01/repo/OracleLinux/ol8_gluster_appstream/getPackage/ >> $LOG_FILE 2>&1

/usr/bin/createrepo /u01/repo/OracleLinux/ol8_addons/getPackage/ >> $LOG_FILE 2>&1




Make the file executable.

# chmod u+x /u01/repo/scripts/repo_sync.sh

Set up a CRON job to run the script on a daily basis. The following entry runs the script each day at 01:00.

0 1 * * * /u01/repo/scripts/repo_sync.sh > /dev/null 2>&1


Setup the HTTP Server

Install the Apache HTTP servers, start it and make sure it restarts automatically on reboot.

# yum install httpd

# systemctl start httpd

# systemctl enable httpd


If you are using the Linux firewall you will need to punch a hole for port 80.

# firewall-cmd --permanent --zone=public --add-port=80/tcp

# firewall-cmd --reload


Either set SELinux to permissive, or configure the fcontext for the repository files as shown below.

# # One-off configuration.

# yum install policycoreutils-python -y

# semanage fcontext -a -t httpd_sys_content_t "/u01/repo/OracleLinux(/.*)?"





# # Run each time the repo contents change.

# restorecon -F -R -v /u01/repo/OracleLinux

Present the repositories using the HTTP server.

# mkdir -p /var/www/html/repo/OracleLinux/ol8_latest

# mkdir -p /var/www/html/repo/OracleLinux/ol8_developer

# mkdir -p /var/www/html/repo/OracleLinux/ol8_developer_EPEL

# mkdir -p /var/www/html/repo/OracleLinux/ol8_spacewalk210_client

# mkdir -p /var/www/html/repo/OracleLinux/ol8_UEKR6

# mkdir -p /var/www/html/repo/OracleLinux/ol8_appstream

# mkdir -p /var/www/html/repo/OracleLinux/ol8_gluster_appstream

# mkdir -p /var/www/html/repo/OracleLinux/ol8_addons



# ln -s /u01/repo/OracleLinux/ ol8_latest /getPackage /var/www/html/repo/OracleLinux/ol8_latest/x86_64

# ln -s /u01/repo/OracleLinux/ol8_developer/getPackage /var/www/html/repo/OracleLinux/ol8_developer/x86_64

# ln -s /u01/repo/OracleLinux/ol8_developer_EPEL/getPackage /var/www/html/repo/OracleLinux/ol8_developer_EPEL/x86_64

# ln -s /u01/repo/OracleLinux/ol8_spacewalk210_client/getPackage /var/www/html/repo/OracleLinux/ol8_spacewalk210_client/x86_64

# ln -s /u01/repo/OracleLinux/ol8_gluster_appstream/getPackage /var/www/html/repo/OracleLinux/ol8_gluster_appstream/x86_64

# ln -s /u01/repo/OracleLinux/ol8_UEKR6/getPackage /var/www/html/repo/OracleLinux/ol8_UEKR6/x86_64

# ln -s /u01/repo/OracleLinux/ol8_appstream/getPackage /var/www/html/repo/OracleLinux/ ol8_appstream/x86_64

# ln -s /u01/repo/OracleLinux/ol8_addons/getPackage /var/www/html/ OracleLinux/ol8_addons/x86_64



Point Servers to the Local Repository

To allow a server to use the local Yum repositories, create a file called "/etc/yum.repos.d/local-ol8.repo" with the following contents, where "ol8-yum.localdomain" is the name of the server with the Yum repositories.

[local_ol8_baseos_latest]

name=Oracle Linux $releasever Latest ($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_latest/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1

enabled=1


[local_ol8_developer]

name=Latest Oracle Linux $releasever Development Packages ($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_developer/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1

enabled=1



[local_ol8_developer_EPEL]

name=Latest Oracle Linux $releasever EPEL Packages for Development ($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_developer_EPEL/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1

enabled=1


[local_ol8_appstream]

name=Oracle Linux $releasever Application Stream ($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_appstream/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1

enabled=1



[local_ol8_gluster_appstream]

name= Oracle Linux $releasever Gluster Appstream($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_gluster_appstream/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1

enabled=1


[local_ol8_spacewalk210_client]

name=Latest Spacewalk Client 2.10 for Oracle Linux 8($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_spacewalk210_client/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1

enabled=1



[local_ol8_UEKR6]

name=Latest Unbreakable Enterprise Kernel for Oracle Linux $releasever ($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_UEKR6/$basearch/

gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8

gpgcheck=1


[ol8_addons]

name=Oracle Linux $releasever Addons Packages ($basearch)

baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_addons/$basearch/

gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle

gpgcheck=1

enabled=1



You may also want to consider installing the following package, to make sure you pick the fastest mirror, which should be your local one.

# yum install yum-plugin-fastestmirror




GIT

GIT- Configuration:
--------------------------------------------------------------------------------

git config --global user.name   “Your Name” Set the name that will be attached to your commits and tags. 

git config --global user.email   “you@example. com” Set the e-mail address that will be attached to your commits and tags. 

git config --global color.ui auto   Enable some colorization of Git output.


GIT- Starting a project
---------------------------------------------------------------------------------------------------

git init [project name]    Create a new local repository in the current directory. If [project name] is provided, Git will create a new directory named [project name] and will initialize a repository inside it.

 git clone   



GIT-Day-to-day work 
-----------------------------------------------------------------------------------------------------


git status    Displays the status of your working directory. Options include new, staged, and modified files. It will retrieve branch name, current commit identifier, and changes pending commit. 

git add [file]    Add a file to the staging area. Use. in place of the full file path to add all changed files from the current directory down into the directory tree. 

git diff [file]    Show changes between working directory and staging area. 

git diff --staged   [  file]   Shows any changes between the staging area and the repository. 

git checkout -- [file]   Discard changes in working directory. This operation is unrecoverable. 

git reset

git rm [file]    Remove file from working directory and staging area.


GIT -Storing your work
-------------------------------------------------------------------------------------------- 

git stash    Put current changes in your working directory into stash for later use. 

git stash pop    Apply stored stash content into working directory, and clear stash. 

git stash drop   Delete a specific stash from all your previous stashes.



GIT branching model
--------------------------------------------------------------------------------------

git branch [-a]   List all local branches in repository. With -a: show all branches (with remote). 

git branch [branch_name]   Create new branch, referencing the current HEAD. 

git rebase [branch_name]   Apply commits of the current working branch and apply them to the HEAD of [branch] to make the history of your branch more linear. 

git checkout [-b] [branch_name]   Switch working directory to the specified branch. With -b: Git will create the specified branch if it does not exist. 

git merge [branch_name]  Join specified [branch_name] branch into your current branch (the one you are on currently). 

git branch -d [branch_ name]   Remove selected branch, if it is already merged into any other. -D instead of -d forces deletion.

Commit a state of the code base 

Branch a reference to a commit; can have a tracked upstream 

Tag a reference (standard) or an object (annotated) 

HEAD a place where your working directory is now


GIT- Synchronizing repositories
----------------------------------------------------------------------------------------------------

git fetch [remote]   Fetch changes from the remote, but not update tracking branches. 

git fetch --prune [remote]   Delete remote Refs that were removed from the remote repository. 

git pull [remote]    Fetch changes from the remote and merge current branch with its upstream. 

git push [--tags] [remote]   Push local changes to the remote. Use --tags to push tags. 

git push -u [remote] [branch]   Push local branch to remote repository. Set its copy as an upstream.


GIT-Inspect history 
-----------------------------------------------------------------------------------------------------

git log [-n count]   List commit history of current branch. -n count limits list to last n commits. 

git log --oneline --graph --decorate   An overview with reference labels and history graph. One commit per line. 

git log ref    List commits that are present on the current branch and not merged into ref. A ref can be a branch name or a tag name. 

git log .ref    List commit that are present on ref and not merged into current branch. 

git reflog   List operations (e.g. checkouts or commits) made on local repository.


GIT-Tagging commits 
--------------------------------------------------------------------------------------------

git tag  List all tags. 

git tag [name] [commit sha]    Create a tag reference named name for current commit. Add commit sha to tag a specific commit instead of current one. 

git tag -a [name] [commit sha]    Create a tag object named name for current commit. 

git tag -d [name]   Remove a tag from local repository


GIT -Reverting changes
--------------------------------------------------------------------------------------------------------

git reset [--hard] [target reference]   Switches the current branch to the target reference, leaving a difference as an uncommitted change. When --hard is used, all changes are discarded. It's easy to lose uncommitted changes with --hard. 

git revert [commit sha]    Create a new commit, reverting changes from the specified commit. It generates an inversion of changes. 


 GIT-Ignoring files
---------------------------------------------------------------------------------------------

cat


Essential Git Commands


Categories are as follows:

  • To create
  • To make local changes
  • To commit history
  • Branches and tags
  • To update and publish
  • To merge and reuse
  • To undo

Let us do define commands that do fall under these categories that are listed below as follows:  

Type 1: CREATE

  1. Clone an existing repository: git clone
  2. Create a new local repository: git init

Type 2: LOCAL CHANGES

  1. Changed files in your working directory: git status
  2. Changes to tracked files: git diff
  3. Add all current changes to the next commit: git add
  4. Add some changes to the next commit: git add -p
  5. Commit all local changes In tracked files: git commit -a
  6. Commit previously staged changes: git commit
  7. Change the last commit: git commit –amend

Type 3: COMMIT HISTORY

  1. Show all commits. starting with newest: git log
  2. Show changes over time for a specific file: git log -p
  3. Who changed what and when in: git blame

Type 4: BRANCHES & TAGS

  1. List all existing branches: git branch -av
  2. Switch HEAD branch: git checkout
  3. Create a new branch based on your current HEAD: git branch
  4. Create a new tracking branch based on a remote branch: git checkout – -track
  5. Delete a local branch: git branch -d
  6. Mark the current commit with a tag: git tag

Type 5: UPDATE and PUBLISH

  1. List all currently configured remotes: git remote -v
  2. Show Information about a remote: git remote show
  3. Add new remote repository, named remote: git remote add
  4. Download all changes from but don’t integrate into HEAD: git fetch
  5. Download changes and directly merge/integrate into HEAD: git pull
  6. Publish local changes on a remote: git push
  7. Delete a branch on the remote: git branch -dr
  8. Publish your tags: git push –tags

Type 6: MERGE & REUSE

  1. Merge into your current HEAD: git merge
  2. Rebase your current HEAD onto git rebase
  3. Abort a rebase: git rebase – -abort
  4. Continue a rebase after resolving conflicts: git rebase – -continue
  5. Use your configured merge tool to solve conflicts: git mergetool
  6. Use your editor to manually solve conflicts and (after resolving) mark tile as resolved: git add, git rm

Type 7: UNDO

  1. Discard all local changes in your working directory: git reset -hard HEAD
  2. Discard local changes in a specific file: git checkout HEAD
  3. Revert a commit (by producing a new commit with contrary changes): git revert
  4. Reset your HEAD pointer to a previous commit and discard all changes since then: git reset –hard
  5. Preserve all changes as unstaged changes: git reset
  6. Preserve uncommitted local changes: git reset – – keep


           The Open SSH

OpenSSH is an implementation of the SSH protocol supported by Linux, UNIX, and similar operating systems. It includes the core files necessary for both the OpenSSH client and server. The OpenSSH suite consists of the following user-space tools:

  • ssh is a remote login program (SSH client).
  • sshd is an OpenSSH SSH daemon.
  • scp is a secure remote file copy program.
  • sftp is a secure file transfer program.
  • ssh-agent is an authentication agent for caching private keys.
  • ssh-add adds private key identities to ssh-agent.
  • ssh-keygen generates, manages, and converts authentication keys for ssh.
  • ssh-copy-id is a script that adds local public keys to the authorized_keys file on a remote SSH server.
  • ssh-keyscan gathers SSH public host keys.

Two versions of SSH currently exist: version 1, and the newer version 2. The OpenSSH suite in RHEL supports only SSH version 2. It has an enhanced key-exchange algorithm that is not vulnerable to exploits known in version 1.

OpenSSH, as one of core cryptographic subsystems of RHEL, uses system-wide crypto policies. This ensures that weak cipher suites and cryptographic algorithms are disabled in the default configuration. To modify the policy, the administrator must either use the update-crypto-policies command to adjust the settings or manually opt out of the system-wide crypto policies.

Restricting access to specific users, groups, or domains

  • The AllowUsers and AllowGroups directives in the /etc/ssh/sshd_config configuration file server enable you to permit only certain users, domains, or groups to connect to your OpenSSH server. You can combine AllowUsers and AllowGroups to restrict access more precisely, for example:

    AllowUsers *@192.168.1.*,*@10.0.0.*,!*@192.168.1.2
    AllowGroups example-group

Linux Log Storage

In Linux, logs are typically stored in the /var/log directory or its subdirectories. The specific location and naming conventions of log files can vary depending on the Linux distribution and the software components installed on the system.

Some of the common log file locations:


  1. /var/log/syslog or /var/log/messages: These files contain general system log messages, including kernel messages and system service status.
  2. /var/log/auth.log or /var/log/secure: These files store authentication-related events, such as login attempts, authentication failures, and user activity logs.
  3. /var/log/apache2/access.log or /var/log/nginx/access.log: These files are specific to web servers like Apache or Nginx and store access logs, including information about incoming HTTP requests and response codes.
  4. /var/log/mysql/error.log or /var/log/postgresql/postgresql-<version>-main.log: These files contain database-specific logs for MySQL or PostgreSQL, respectively. They can provide insights into database activities, errors, queries, and performance-related events.
  5. /var/log/daemon.log or /var/log/systemd.log: These files capture logs from system daemons and services.
  
The following two services handle syslog messages:
  • The systemd-journald daemon
  • The Rsyslog service

Sub-directories storing syslog messages

The following sub-directories under the /var/log directory store syslog messages.

  • /var/log/messages - all syslog messages except the following
  • /var/log/secure - security and authentication-related messages and errors
  • /var/log/maillog - mail server-related messages and errors
  • /var/log/cron - log files related to periodically executed tasks
  • /var/log/boot.log - log files related to system startup

Top Log Files to Monitor in Linux

In Linux, there are several important files and directories that are commonly monitored for various purposes, including system monitoring, security monitoring, and troubleshooting.

Here are some key files and directories that are often monitored:

S.No.FilesPurpose
 1  /var/log/syslog General system-wide event logs
 2 /var/log/kern.log Kernel-specific messages
 3 /var/log/boot.log Logs related to system boot processes
 4 /var/log/auth.log Records authentication-related events, such as user logins and system authentication attempts
 5 /var/log/dpkg.log Logs package installation, removal, and modification actions performed with the APT package manager
 6 /var/log/dmesg Kernel ring buffer logs that contain information about hardware and device drivers
 7 /var/log/iptables.log Logs generated by the iptables firewall tool
 8 /var/log/apache2/access.log Records HTTP requests made to the Apache web server
 9 /var/log/apache2/error.log Contains error messages and warnings from the Apache web server
 10 /var/log/mysql/error.log Logs MySQL database server errors and warnings
 11 /var/log/postgresql/postgresql-X.X-main.log PostgreSQL database server logs (X.X represents the version number)
 12 /var/log/mail.log Logs mail server activity, including sending, receiving, and delivery of emails
 13 /var/log/auth.log Records authentication-related events, including failed login attempts
 14 /var/log/secure Security-related events and authentication logs on some Linux distributions (e.g., CentOS/RHEL)
 15 /var/log/cron.log Logs cron job execution and related information
 16 /var/log/daemon.log Records events and errors related to system daemons

These are just some examples of files and directories that are commonly monitored. The specific files to monitor may vary depending on the Linux distribution, the installed software components, and the monitoring requirements of the system.


YUM



To search for a package, use:

# yum search <package name>

To include term matches within package descriptions, use:


# yum search --all <package name> 

Use the following procedure to list installed and available packages.

  • To list information about all installed and available packages, use:

    # yum list --all
  • To list all packages installed on your system, use:

    # yum list --installed
  • To list all packages in all enabled repositories that are available to install, use:

    # yum list --available

Use the following procedure to list enabled and disabled repositories.


  • To list all enabled repositories on your system, use:

    # yum repolist
  • To list all disabled repositories on your system, use:

    # yum repolist --disabled
  • To list both enabled and disabled repositories, use:

    # yum repolist --all
  • To list additional information about the repositories, use:

    # yum repoinfo
To display information about one or more packages, use:

               # yum info package-name


To ensure global expressions are passed to yum as intended, use one of the following methods:

  • Double-quote or single-quote the entire global expression.

                  # yum provides "*/file-name"

    Replace file-name with the name of the file.

  • Escape the wildcard characters by preceding them with a backslash (\) character.

                   # yum provides \*/file-name


          Replace file-name with the name of the file.


Installing software packages

The following section describes how to use yum to:

  • Install packages.
  • Install a package group.
  • Specify a package name in yum input.

7.4.1. Installing packages with YUM

  • To install a package and all the package dependencies, use:

    # yum install package-name

    Replace package-name with the name of the package.

  • To install multiple packages and their dependencies simultaneously, use:

    # yum install package-name-1 package-name-2

    Replace package-name-1 and package-name-2 with the names of the packages.

  • When installing packages on a multilib system (AMD64, Intel 64 machine), you can specify the architecture of the package by appending it to the package name:

    # yum install package-name.arch

    Replace package-name.arch with the name and architecture of the package.

  • If you know the name of the binary you want to install, but not the package name, you can use the path to the binary as an argument:

    # yum install /usr/sbin/binary-file

    Replace /usr/sbin/binary-file with a path to the binary file.

    yum searches through the package lists, finds the package which provides /usr/sbin/binary-file, and prompts you as to whether you want to install it.

  • To install a previously-downloaded package from a local directory, use:

    # yum install /path/

    Replace /path/ with the path to the package.

Note that you can optimize the package search by explicitly defining how to parse the argument. See Section 7.4.3, “Specifying a package name in YUM input” for more details.

 Installing a package group with YUM

The following procedure describes how to install a package group by a group name or by a groupID using yum.

  • To install a package group by a group name, use:

    # yum group install group-name

    Or

    # yum install @group-name

    Replace group-name with the full name of the group or environmental group.

  • To install a package group by the groupID, use:

    # yum group install groupID

    Replace groupID with the ID of the group.


Yum Update
 

       # yum check-update


       # yum update package-name

       # yum group update group-name

       # yum update

       # yum update --security 

      # yum update-minimal --security

Enabling DNF Automatic

To run DNF Automatic, you always need to enable and start a specific systemd timer unit. You can use one of the timer units provided in the dnf-automatic package, or you can write your own timer unit depending on your needs.

The following section describes how to enable DNF Automatic.

Prerequisites

  • You specified the behavior of DNF Automatic by modifying the /etc/dnf/automatic.conf configuration file.

For more information about DNF Automatic configuration file, see Section 2.5.6.2, “DNF Automatic configuration file”.

Procedure

  • Select, enable and start a systemd timer unit that fits your needs:

    # systemctl enable --now <unit>

    where <unit> is one of the following timers:

    • dnf-automatic-download.timer
    • dnf-automatic-install.timer
    • dnf-automatic-notifyonly.timer
    • dnf-automatic.timer

For downloading available updates, use:

# systemctl enable dnf-automatic-download.timer
# systemctl start dnf-automatic-download.timer

For downloading and installing available updates, use:

# systemctl enable dnf-automatic-install.timer
# systemctl start dnf-automatic-install.timer

For reporting about available updates, use:

# systemctl enable dnf-automatic-notifyonly.timer
# systemctl start dnf-automatic-notifyonly.timer

Optionally, you can use:

# systemctl enable dnf-automatic.timer
# systemctl start dnf-automatic.timer

In terms of downloading and applying updates, this timer unit behaves according to settings in the /etc/dnf/automatic.conf configuration file. The default behavior is similar to dnf-automatic-download.timer: it downloads the updated packages, but it does not install them.

 

How to recover redhat kvm  vms root  password

yum -y install libguestfs-tools

root@box1 # guestfish --rw -a ./rhel-guest-image-7.1-20150224.0.x86_64.qcow2
><fs> run
><fs> list-filesystems
><fs> mount /dev/sda1 /
><fs> vi /etc/shadow
 REPLACE ROOT PASSWORD IN /etc/shadow
[root@someothersystem ~]# openssl passwd -1 changeme
$1$QiSwNHrs$uID6S6qOifSNZKzfXsmQG1 
  


Create a local repo with Red Hat Enterprise Linux 8/9
  • Only a RHEL 8 system, Red Hat Satellite, or a Capsule can sync RHEL 8 content correctly.
  • On RHEL8, ensure you have yum-utils-4.0.8-3.el8.noarch or higher installed so reposync correctly downloads all the packages.

Sync all enabled repositories and their repodata

    # reposync -p <download-path> --download-metadata --repo=<repo id>reposync --repoid=rhel-8-for-x86_64-highavailability-rpms --download-path=/ha_soft -n --downloadcomps --download-metadata 

To sync a specific minor release

For systems registered to the CDN or Red Hat Satellite you must release lock the system with subscription-manager

# subscription-manager release --set=8.4 && rm -rf /var/cache/dnf

 At this point your system will only have access to content released for RHEL 8.0- 8.4. If you are syncing multiple minor releases, you must keep these separate from each other. For example to sync both 8.4 and 8.5:

# subscription-manager release --set=8.4 && rm -rf /var/cache/dnf
# reposync -p /var/www/html/8.4 --download-metadata --repo=<repo id>
# subscription-manager release --set=8.5 && rm -rf /var/cache/dnf
# reposync -p /var/www/html/8.5 --download-metadata --repo=<repo id>

To sync only the latest content for a specific minor release, you must set the subscription-manager version-lock. Then run reposync with the -n option to specify that you only wish to download the latest content (and not content for older minor release versions as well):

# subscription-manager release --set=8.4 && rm -rf /var/cache/dnf
# reposync -n -p /var/www/html/8.4 --download-metadata --repo=<repo id>
# subscription-manager repos --list
# subscription-manager list --available
# subscription-manager list --consumed

# subscription-manager attach --pool 8a85f9a17f69ca57017faa742b2662ad
# subscription-manager refresh

# subscription-manager repos --enable rhel-8-for-x86_64-highavailability-rpms




Boot From SAN on RedHat with PowerPath and EMC Clarion

Boot From SAN with LVM and Multipath


SEE http://www.thogan.com/site/index.php?option=com_content&view=article&id=5:ubuntu-multipath-boot-from-san-experiment&catid=2:uncatagorized&Itemid=2 for information on our experience with Ubuntu :)

 

Before getting started, you will need to make sure that you have to proper installation materials, and that the SAN configuration is appropriately setup for a system install.

 

Install Media

 

RHEL 4, Update 6 (RHEL 4.6) or RHEL 5.  Earlier versions of RedHat, including earlier update versions, have an improperly functioning QLogic driver.  Use this specific installation media for this document.  Also, depending on the version of the QLogic driver, the SAN devices may be laid out before or after the local storage, use fdisk and look at the volume sizes to identify the local storage and remember which it is.

 

SAN Configuration

 

One path to the SAN.  There cannot be multiple paths to the SAN during an install as it will cause problems with mounting /boot and finding the LVM partitions.  The system must be booted in order to correct the configuration, so you must perform the install with only one path configured.  Once the system boots, the appropriate adjustments can be made to fstab and the LVM to allow the system to boot properly with multiple paths.

 

Location of SAN Boot Card

 

You must know in which PCI slot the HBA that you will be booting off resides.  You will need to configure the BIOS to boot from here.  You must also make sure that this is the card with the active path, and you will need to configure that specific card to have boot enabled.

 

Three things that need to line up:

 

BIOS boot device = HBA w/active path = HBAconfigured to boot

 

BIOS Configuration

 

This section is written based on an installation on IBM x86 hardware.  If you are using another platform these menus may be different.

 

Setting The Boot Device

 

Boot the system and enter the system BIOS.  You will need to make sure that the SAN card is a valid boot device.

 

Select “Start Options”

Go To “PCI Device Boot Priority”

                Modify this field to reflect the PCI slot number in which the boot HBA resides.

Go To “Startup Sequence Options”

                Under “Primary Startup Sequence”, set the four devices as follows:[1]

                “CD ROM”

                “Hard Disk 0”

                “Hard Disk 1”

                “Network”

Escape back to the main menu.

Select “Save Settings” then “Exit Setup”

 

Configuring the HBA

 

The HBA will now need to be configured to be bootable.  On the next boot, enter the HBA BIOS.  This document was written against QLogic 2460 HBAs.  If you are using a different HBA, the process may vary.

 

Enter the BIOS with a <CTRL-Q> when prompted.

Select the adapter with the active path (also should be the slot configured for boot in the BIOS)

Select “Configuration Settings”

Select “Adapter Settings”

                Set “Host Adapter BIOS” to “Enabled”

Return to the previous menu.

Select “Selectable Boot Settings”

                Set “Selectable Boot” to “Enabled”

                Set each boot device by selecting the field, pressing Enter, then selecting a LUN.

Escape back to the main menu, and select “Save Changes” when prompted.

Select “Select Host Adapter”

Select the other adapter this time (the NON boot one)

Repeat the process as with the first adapter, EXCEPT:

                Disable the Host Adapter BIOS”

                Disable “Selectable Boot”

Escape to the main menu and save changes again.

Exit the utility and reboot the system.

 

Starting the Linux Install

 

Have the appropriate RedHat media in the optical drive and boot the system.  Boot to the default graphical install.  Watch when the “Loading SCSI Drivers” screen appears, you should see the module for the HBAs get loaded.  For the QLogic cards, this is qla2xxx or qla2400.

 

Once the graphical installer is fully started and prompting you to click next to begin, switch to the terminal by pressing “CTRL-ALT-F2”.

 

At the console, enter “ls /dev/sd*”.  You should see at least /dev/sda and /dev/sdb.  There may be more.  Identify the SAN and local devices.  The local device will usually be /dev/sda.  You can test this by entering “fdisk /dev/sda”, then at the menu enter “p” to print the partition table.  It will also tell you the size of the volume.  Look for a size that indicated a SAN LUN or local storage and remember which devices are which.

 

Addendum to Standard Linux Build – Partitioning

 

The name of the volume group created on the SAN device should be “sanvg”.  The /boot partition should be create on the SAN device as well.

 

Continue with the install from this point as described in “Standard Linux Build”.

 

First Boot After Install

 

The first boot of the system after installation will likely FAIL.  This is normal, as the installer did not choose the appropriate boot device when installing GRUB.  To boot the system you will need to modify the GRUB commands.

 

After you are informed of the failed boot, hit enter to get the GRUB menu.

 

OH NO!  GRUB comes up and the screen is all wiggedy wack!  Read Appendix A at the end of the document for help!

 

With the first boot option selected, press “e” for edit.

                The first line in the next menu should be something like “root (hd1,0)”.

                Press “e” to edit this line.

                                Change the line to read “root (hd0,0)”

                                Hit enter to accept your changes

                Press “b” to boot the system with the modified commands.

 

Later in this document we will edit grub.conf to permanently make this modification.

 

If you see GRUB in upper left of screen after reboot:

Grub may fail to install to the correct path so it may be necessary to bootup from the DVD/CD in rescue mode using linux rescue at the promt and then performing a grub install as follows:

    chroot /mnt/sysimage

    grub-install /dev/sdb

 

Install EMC PowerPath

 

The PowerPath software will perform failover functions as well as create special /dev devices allowing unambiguous access to the active path.

 

Fetch the install archive EMCpower.LINUX-5.1.2.00.00-021.tar.gz and extract it.  Then use rpm to install the appropriate package onto the system:

 

Verify EMC PowerPath Install

 

PowerPath should now be installed.  To verify, type “lsmod | grep emc”  You should see a lot of modules with names beginning with emc.  This indicates that PowerPath has loaded successfully.

 

Start PowerPath with its init script.  Afterward you should see it coalesce the available paths to the SAN into a new virtual device.  Verify that this is your SAN device by reading the partition table with fdisk.

 

[root@ ~]# service PowerPath start

Starting PowerPath:  done

[root@ ~]# ls /dev/emcpower*

/dev/emcpower  /dev/emcpowera  /dev/emcpowera1  /dev/emcpowera2

 

As you can see above, there are now devices for /dev/emcpowera, a block device representing the SAN which is backed by /dev/sdb - /dev/sde.

 

[root@ ~]# fdisk /dev/emcpowera

 

The number of cylinders for this disk is set to 9137.

There is nothing wrong with that, but this is larger than 1024,

and could in certain setups cause problems with:

1) software that runs at boot time (e.g., old versions of LILO)

2) booting and partitioning software from other OSs

   (e.g., DOS FDISK, OS/2 FDISK)

 

Command (m for help): p

 

Disk /dev/emcpowera: 75.1 GB, 75161927680 bytes

255 heads, 63 sectors/track, 9137 cylinders

Units = cylinders of 16065 * 512 = 8225280 bytes

 

         Device Boot      Start         End      Blocks   Id  System

/dev/emcpowera1   *           1          19      152586   83  Linux

/dev/emcpowera2              20        9137    73240335   8e  Linux LVM

 

Command (m for help): q

 

A quick run of fdisk above shows that this is definitely our SAN volume.  The boot partition /dev/sdb1 is now available as /dev/emcpowera1.  

 

Modify modprobe.conf

 

At the end of /etc/modprobe.conf add the following line:

 

options scsi_mod max_scsi_luns=256

 

 

Modify grub.conf

 

Open the file and make the following edits:

 

Change any occurrence of “(hd*,0)” to “(hd0,0)”. (Where * is any number that is not 0).

 

On any line that starts with kernel, remove “rhgb quiet” from the end of it.

 

Comment out the “hiddenmenu” option with a “#” at the start of the line.

 

When you are finished, the file should look something like this:

 

# grub.conf generated by anaconda

#

# Note that you do not have to rerun grub after making changes to this file

# NOTICE:  You have a /boot partition.  This means that

#          all kernel and initrd paths are relative to /boot/, eg.

#          root (hd1,0)

#       kernel /vmlinuz-version ro root=/dev/sanvg/rootlv

#          initrd /initrd-version.img

#boot=/dev/sda

default=0

timeout=5

splashimage=(hd0,0)/grub/splash.xpm.gz

#hiddenmenu

title Red Hat Enterprise Linux AS (2.6.9-67.ELsmp)

        root (hd0,0)

        kernel /vmlinuz-2.6.9-67.ELsmp ro root=/dev/sanvg/rootlv

    initrd /initrd-2.6.9-67.ELsmp.img

title Red Hat Enterprise Linux AS-up (2.6.9-67.EL)

        root (hd0,0)

        kernel /vmlinuz-2.6.9-67.EL ro root=/dev/sanvg/rootlv

        initrd /initrd-2.6.9-67.EL.img

 

Modify the LVM Config

 

Finally, you must modify the LVM config file in /etc/lvm/lvm.conf to ignore the raw paths to the SAN and only use the PowerPath devices.

 

Find the line that sets up the default filter:

 

filter = [ "a/.*/" ]

 

Comment it out with a “#” at the start of the line, then put in the following line to tell LVM to only look at the emcpower devices and local storage:

 

filter = [ "a/sda/", "a/emcpower/", "r/.*/" ]

 

This is assuming that /dev/sda is local storage, you may have to modify this line if another device is local storage.

 

To make sure that the filter is working, run “vgscan” and verify that there are no messages about a “Duplicate PV”.

 

[root@mnsvliapp003 ~]# vgscan

  Reading all physical volumes.  This may take a while...

  Found volume group "sanvg" using metadata type lvm2

 

Setting Failover Policy

 

The appropriate failover policy will need to be set depending on the type of SAN.  Up to this point, only one path to each service processor should show as “active”, the rest show a state of “unlic”.  Running “powermt display dev=all” will show this information:

 

If the PowerPath license has not been installed do so with:

    emcpreg --install

 

[root@~]# powermt display dev=all

Pseudo name=emcpowera

CLARiiON ID=APM00064800054 [prod_jboss1]

Logical device ID=60060160A9D01A00A2AD9882F5ACDC11 [prod_jboss1_lun20]

state=alive; policy=BasicFailover; priority=0; queued-IOs=0

Owner: default=SP A, current=SP A

==============================================================================

---------------- Host ---------------   - Stor -   -- I/O Path -  -- Stats ---

### HW Path                 I/O Paths    Interf.   Mode    State  Q-IOs Errors

==============================================================================

   1 qla2xxx                   sdb       SP A4     active  alive      0      0

   1 qla2xxx                   sdc       SP B5     active  alive   0      0

   2 qla2xxx                   sdd       SP A5     unlic   alive      0      0

   2 qla2xxx                   sde       SP B4     unlic   alive      0      0

 

For a CLARiiON array, issue the following command to set the failover policy to “CLARiiON Optimal”.  this will cause all other paths to become active.  You will then need to save the configuration, and it will then persist across reboots.

 

[root@ ~]# powermt set policy=co

[root@ ~]# powermt display dev=all

Pseudo name=emcpowera

CLARiiON ID=APM00064403323 [dr_epicdb]

Logical device ID=600601602E811900C8E4B43C79AADC11 [dr_epicdb_LUN_100]

state=alive; policy=CLAROpt; priority=0; queued-IOs=0

Owner: default=SP A, current=SP A

==============================================================================

---------------- Host ---------------   - Stor -   -- I/O Path -  -- Stats ---

### HW Path                 I/O Paths    Interf.   Mode    State  Q-IOs Errors

==============================================================================

   1 qla2xxx                   sdb       SP B4     active  alive      0      0

   1 qla2xxx                   sdc       SP A5     active  alive      0      0

   2 qla2xxx                   sdd       SP B4     active  alive      0      0

   2 qla2xxx                   sde       SP A5     active  alive      0      0

 

Error displaying HBAs and associated devices.

 

[root@ ~]# powermt save

 

CABLE PULL TEST

 

At this point in the document, the configuration should be correct to survive a cable pull test.  If the system cannot recover from the I/O errors after a cable pull at this point, something is wrong with the configuration.  Review all steps and ensure that the output from the diagnostic commands is consistent with what is documented here.

 

Finishing Up

 

The system should be configured to boot and handle multiple paths now.  Have the extra paths configured on the SAN then reboot the system.

 

During the system startup, PowerPath may report failure to start.  This is fine, all that failed was the module load, which is because the modules were already loaded in the initrd.

 

Checking the PowerPath Configuration

 

PowerPath should now see all the active paths to the storage.  To verify this, run the command “powermt display dev=all”.  This should return the expected number of paths and show what raw devices are backing each path.

 

[root@ ~]# powermt display dev=all

Pseudo name=emcpowera

CLARiiON ID=APM00064403323 [dr_epicdb]

Logical device ID=600601602E811900C8E4B43C79AADC11 [dr_epicdb_LUN_100]

state=alive; policy=CLAROpt; priority=0; queued-IOs=0

Owner: default=SP A, current=SP A

==============================================================================

---------------- Host ---------------   - Stor -   -- I/O Path -  -- Stats ---

### HW Path                 I/O Paths    Interf.   Mode    State  Q-IOs Errors

==============================================================================

   1 qla2xxx                   sdb       SP B4     active  alive      0      0

   1 qla2xxx                sdc       SP A5     active  alive      0      0

   2 qla2xxx                   sdd       SP B4     active  alive      0      0

   2 qla2xxx                   sde       SP A5     active  alive      0      0

 

Error displaying HBAs and associated devices.

 



[1]               Many BIOSes have an option for “PCI” or “Additional Boot Devices”, or even names the HBA.  If this is the case on the target system, use that selection instead of “Hard Disk”.  On the IBM hardware the PCI boot device magically becomes Hard Disk 0 or 1 in the boot order, so make sure they are both in there.  Boot from SAN may fail if there are bootable partitions on ANY local storage device.

 

To upgrade the Kernel:


Move /etc/init.d/PowerPath to /root.


Comment out references to PowerPath pseudo (emcpower?) devices from system configuration files such as /etc/fstab and /etc/lvm/lvm.conf.


Reboot the machine.


Stop the Navisphere agent (CLARiiON only)

# /etc/init.d/naviagent stop

 

Stop the ECC Master Agent (Symmetrix only)

# /etc/init.d/eccmad stop

Kill any remaining "mlragent" processes.


Uninstall the EMCpower.LINUX rpm package

# rpm -e EMCpower.LINUX


Upgrade the kernel.


Reboot the machine.


Stop the Navisphere agent (CLARiiON only)

# /etc/init.d/naviagent stop

 

Stop the ECC Master Agent (Symmetrix only)

# /etc/init.d/eccmad stop

Kill any remaining "mlragent" processes.

 

Uncomment references to PowerPath pseudo devices from system configuration files such as /etc/fstab and /etc/lvm/lvm.conf.


Reboot the machine. 


LVM Cheat Sheet

 

Example:

Add a new disk with VMware or physical

 

Resize a VMWare PV

vmkfstools -X 125G /vmfs/volumes/LUNNAME/servername/servername.vmdk 

pvchange -x n /dev/sdb

pvresize /dev/sdb

pvchange -x y /dev/sdb

 

 

echo "scsi scan-new-devices" > /proc/scsi/scsi  to recognize the new disk or reboot the server

For single devices use: 

echo "scsi add-single-device 0 0 1 0" > /proc/scsi/scsi

echo "scsi add-single-device 0 0 2 0" > /proc/scsi/scsi

....

echo "scsi add-single-device 0 0 4 0" > /proc/scsi/scsi

echo "scsi add-single-device 0 0 5 0" > /proc/scsi/scsi

echo "scsi add-single-device 0 0 6 0" > /proc/scsi/scsi

 

After creating a new 50 GB partition (/dev/sdb1) with fdisk run the below commands:

# partprobe

# pvcreate /dev/sdb1

# vgcreate vg1 /dev/sdb1

# lvcreate -L 49G -n lv1 vg1

lvcreate -l 100%FREE -n  lv1 vg1

# mke2fs -j /dev/vg1/lv1

 

To remove:

pvremove -ff /dev/sdaX

 

Add disk example:

fdisk /dev/sdc and /dev/sdd

end up with /dev/sdc1 and /dev/sdd1

partprobe

pvcreate /dev/sdc1

pvcreate /dev/sdd1

vgextend vg1 /dev/sdc1 /dev/sdd1

lvextend -L +98G /dev/vg1/lv1

ext2online /dev/vg1/lv1

 

    or unmounted disk method:

    umount /usr/local/cvsroot

    e2fsck -f /dev/vg1/lv1

    resize2fs /dev/vg1/lv1

    reboot or remount the drive manually

 

 

Combining Disks:

To create new physical volume

pvcreate /dev/sda

 

Create a volume group from physical disks sda and sdb

vgcreate StorageVG /dev/sda /dev/sdb

 

Extend an existing volume group with additional physical disks

vgextend StorageVG /dev/sda /dev/sdb

 

Create or mod a logical volume

lvcreate -n MyStorageLV --size 10000 StorageVG

creates a 10GB logical volume called MyStorageLV on  the Volume Group StorageVG

lvcreate -l 100%FREE -n  MyStorageLV StorageVG


 

Create an ext3 filesystem on the new LV

mke2fs -j /dev/StorageVG/MyStorageLV

to make swap use mkswap instead

 

create a mount point i.e. mkdir /mnt/mystorage

then mount with:

mount -t ext3 /dev/StorageVG/MyStorageLV /mnt/mystorage

 

add the new mount to  /etc/fstab by adding line:

/dev/StorageVG/MyStorageLV /mnt/mystorage

 

To extend the logical volume, expand VG if required and then use:

lvextend --size <size> /dev/StorageVG/MyStorageLV

 

Finally expand the filesystem to utilize new disk space with:

resize2fs /dev/StorageVG/MyStorageLV 

 

Real example of extending /var online

added disk with VMWare edit settings where disk 0 and 1 already exist

echo "scsi add-single-device 0 0 2 0" > /proc/scsi/scsi

fdisk /dev/sdc  choose n  p 1 then defaults

partprobe

pvcreate /dev/sdc1

vgextend LogVolGrp00 /dev/sdc1

lvextend -L +7936M  /dev/LogVolGrp00/LvVar

ext2online /dev/LogVolGrp00/LvVar

 

Multipath device  using LVM example

Move /etc/multipath.conf to /tmp: if you're lucky like me your devices are already supported and this will unblacklist your new entries, otherwise you will need to go and see my other multipath doc, vendor docs for your devices, and redhat docs to get everything working


Edit /etc/lvm/lvm.conf and replace filter with:

filter = [ "a/dev/mapper/.*/", "r/dev/sd.*/", "r/dev/cciss/.*/" ]

# Will prevent lvm from spitting out a bunch of errors about the underlying devices that we don't need anyhow


Do following to add all four paths for Lun1 to the server:

echo "scsi add-single-device 0 0 0 1" > /proc/scsi/scsi

echo "scsi add-single-device 1 0 0 1" > /proc/scsi/scsi

echo "scsi add-single-device 0 0 1 1" > /proc/scsi/scsi

echo "scsi add-single-device 1 0 1 1" > /proc/scsi/scsi


multipath -ll # to get the device id for following commands (multipath.conf must be gone or configured correctly for this to work)


pvcreate /dev/mapper/3600601601781190068d807e1f3cade11 # create physical volume for use with lvm

vgcreate appsvg /dev/mapper/3600601601781190068d807e1f3cade11 # create volume group


lvcreate -L +19G -n appslv appsvg  # Create 19G logical volume 

lvcreate -l 100%FREE -n  lv1 vg1


mke2fs -j /dev/mapper/appsvg-appslv  #fomats filesystem ext3


e2label /dev/mapper/appsvg-appslv apps

#Now we can use the apps label to mount this guy


edit rc.sysinit with:

echo "TIME TO PUT SOME MPATH LINKING  HERE!"

    ORIGINAL_IFS=$IFS

    IFS=`echo -en '\n\b'`

    MPATH_LABELS=`for d in /dev/mapper/* ; do echo -n "$d " ; e2label $d 2>&1 ; done | grep mapper | grep -v Bad`

    mkdir /dev/mpath_links 2>/dev/null

    rm -f /dev/mpath_links/*

    for mpath in $MPATH_LABELS; do

        mpath_device=`echo $mpath | awk '{print $1}'`

        mpath_label=`echo $mpath | awk '{print $2}'`

        mpath_link="/dev/mpath_links/$mpath_label"

        echo "Linking $mpath_device -> $mpath_link"

        ln -s $mpath_device $mpath_link

    done

    IFS=$ORIGINAL_IFS

    echo "DONE WITH CUSTOM MPATH LINKING"


immediately after:

if strstr "$cmdline" noreadonlyroot ; then

READONLY=no

fi


and edit fstab with:

/dev/mpath_links/apps      /apps                   ext3    defaults        0 0



Script to create VMSS and LB in Azure 
#!/bin/bash
az network vnet create -g MyRG -n VNET1
az network vnet subnet create -g MyRG --vnet VNET1 -n S1
az network public-ip create -g MyRG -n IP1 --allocation-method static --dns-name MyDNS
az network lb create -g MyRG -n LB1 --frontend-ip-name LBFE --backend-pool-name LBBE
az network lb inbound-nat-rule create -g MyRG -n SSH1 --lb-name LB1 --backend-port 22 \
     --frontend-port 21 --frontend-ip-name LBFE --protocal tcp
az network lb inbound-nat-rule create -g MyRG -n SSH2 --lb-name LB1 --backend-port 22 \
     --frontend-port 23 --frontend-ip-name LBFE --protocal tcp
az network lb probe create -g MyRG -n HTTPPROBE --lb-name LB1 --port 80 --protocol tcp
az network lb rule create -g MyRG -n HTTP1 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \
     --frontend-ip-name LBFE  --frontend-port 80 --backend-pool-name LBBE --backend-port 80
az network lb rule create -g MyRG -n HTTP2 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \
     --frontend-ip-name LBFE  --frontend-port 1234 --backend-pool-name LBBE --backend-port 8000
az network lb show -g MyRG -n LB1
az network nic create -g MyRG -n NIC1 --subnet-name S1 --vnet-name VNET1 --lb-address-pool-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/backendAddressPools/LBBE' --lb-nat-rule-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/inboundNatRules/ssh1'
az network nic create -g MyRG -n NIC2 --subnet-name S1 --vnet-name VNET1 --lb-address-pool-ids 'LBBE' --lb-nat-rule-ids 'ssh1'
az vm availability-set create -g MyRG -n AS1
az vm create -g MyRG -n VM1 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC1
az vm create -g MyRG -n VM2 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC2































Comments

Popular posts from this blog

RHEL - How to back out a failed patch

Local Yum Repository for Oracle Linux 8