Vathsa's- Linux - SysOps and DevOps
AZURE SAMPLE WORK-1Cluster 1
# Create a resource group.
az group create --name $RgName --location $Location
# Create an availability set for the two VMs that host both websites.
az vm availability-set create --resource-group $RgName --location $Location --name MyAvailabilitySet --platform-fault-domain-count 2 --platform-update-domain-count 2
# Create a virtual network and a subnet.
az network vnet create --resource-group $RgName --name MyVnet --address-prefix 10.0.0.0/16 --location $Location --subnet-name MySubnet --subnet-prefix 10.0.0.0/24
# Create three public IP addresses; one for the load balancer and two for the front-end IP configurations.
az network public-ip create --resource-group $RgName --name MyPublicIp-LoadBalancer
--allocation-method Dynamic
az network public-ip create --resource-group $RgName --name MyPublicIp-Contoso --allocation-method Dynamic
az network public-ip create --resource-group $RgName --name MyPublicIp-Fabrikam --allocation-method Dynamic
# Create a load balancer.
az network lb create --resource-group $RgName --location $Location --name MyLoadBalancer
--frontend-ip-name FrontEnd --backend-pool-name BackEnd --public-ip-address MyPublicIp-LoadBalancer
# Create two front-end IP configurations for both web sites.
az network lb frontend-ip create --resource-group $RgName --lb-name MyLoadBalancer --public-ip-address MyPublicIp-Contoso --name FeContoso
az network lb frontend-ip create --resource-group $RgName --lb-name MyLoadBalancer --public-ip-address MyPublicIp-Fabrikam --name FeFabrikam
# Create the back-end address pools.
az network lb address-pool create --resource-group $RgName --lb-name MyLoadBalancer --name BeContoso
az network lb address-pool create --resource-group $RgName --lb-name MyLoadBalancer --name BeFabrikam
# Create a probe on port 80
az network lb probe create --resource-group $RgName --lb-name MyLoadBalancer --name MyProbe
--protocol Http --port 80 --path
# Create the load balancing rules
az network lb rule create --resource-group $RgName --lb-name MyLoadBalancer --name LBRuleContoso --protocol Tcp --probe-name MyProbe --frontend-port 5000 --backend-port 5000 \
--frontend-ip-name FeContoso --backend-pool-name BeContoso
az network lb rule create --resource-group $RgName --lb-name MyLoadBalancer --name LBRuleFabrikam --protocol Tcp --probe-name MyProbe --frontend-port 5000 --backend-port 5000 \
--frontend-ip-name FeFabrikam --backend-pool-name BeFabrikam
# ############## VM1 ###############
# Create an Public IP for the first VM
az network public-ip create --resource-group $RgName --name MyPublicIp-Vm1 --allocation-method Dynamic
# Create a network interface for VM1
az network nic create --resource-group $RgName --vnet-name MyVnet --subnet MySubnet --name MyNic-Vm1 --public-ip-address MyPublicIp-Vm1
# Create IP configurations for POOL1 and POOL2
az network nic ip-config create --resource-group $RgName --name ipconfig2 --nic-name MyNic-Vm1 --lb-name MyLoadBalancer --lb-address-pools BeContoso
az network nic ip-config create --resource-group $RgName --name ipconfig3 --nic-name MyNic-Vm1 --lb-name MyLoadBalancer --lb-address-pools BeFabrikam
# Create Vm
az vm create --resource-group $RgName --name MyVm1 --nics MyNic-Vm1 \
--image UbuntuLTS --availability-set MyAvailabilitySet --admin-username azureadmin --generate-ssh-keys
############### VM2 ###############
# Create an Public IP for the second VM
az network public-ip create --resource-group $RgName --name MyPublicIp-Vm2 --allocation-method Dynamic
# Create a network interface for VM2.
az network nic create --resource-group $RgName --vnet-name MyVnet --subnet MySubnet --name MyNic-Vm2 --public-ip-address MyPublicIp-Vm2
# Create IP-Configs for Contoso and Fabrikam
az network nic ip-config create --resource-group $RgName --name ipconfig2 --nic-name MyNic-Vm2 --lb-name MyLoadBalancer --lb-address-pools BeContoso
az network nic ip-config create --resource-group $RgName --name ipconfig3 --nic-name MyNic-Vm2 --lb-name MyLoadBalancer --lb-address-pools BeFabrikam
# Create Vm2
az vm create --resource-group $RgName --name MyVm2 --nics MyNic-Vm2 --image UbuntuLTS --availability-set MyAvailabilitySet --admin-username azureadmin --generate-ssh-keys
Azure virtual network
- create, change, or delete
-------------------------------------------------------------------------------------------------az network vnet create -g MyRG -n VNET1 az network vnet subnet create -g MyRG --vnet VNET1 -n S1 az network public-ip create -g MyRG -n IP1 --allocation-method static --dns-name MyDNS az network lb create -g MyRG -n LB1 --frontend-ip-name LBFE --backend-pool-name LBBE az network lb inbound-nat-rule create -g MyRG -n SSH1 --lb-name LB1 --backend-port 22 \ --frontend-port 21 --frontend-ip-name LBFE --protocal tcp az network lb inbound-nat-rule create -g MyRG -n SSH2 --lb-name LB1 --backend-port 22 \ --frontend-port 23 --frontend-ip-name LBFE --protocal tcp az network lb probe create -g MyRG -n HTTPPROBE --lb-name LB1 --port 80 --protocol tcp az network lb rule create -g MyRG -n HTTP1 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \ --frontend-ip-name LBFE --frontend-port 80 \ --backend-pool-name LBBE --backend-port 80 az network lb rule create -g MyRG -n HTTP2 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \ --frontend-ip-name LBFE --frontend-port 1234 \ --backend-pool-name LBBE --backend-port 8000 az network lb show -g MyRG -n LB1 az network nic create -g MyRG -n NIC1 --subnet-name S1 --vnet-name VNET1 \ --lb-address-pool-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/backendAddressPools/LBBE' \ --lb-nat-rule-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/inboundNatRules/ssh1' az network nic create -g MyRG -n NIC2 --subnet-name S1 --vnet-name VNET1 \ --lb-address-pool-ids 'LBBE' \ --lb-nat-rule-ids 'ssh1' az vm availability-set create -g MyRG -n AS1 az vm create -g MyRG -n VM1 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC1 az vm create -g MyRG -n VM2 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC2
Docker Commands and Best Practices
-----------------------------------------------------------------------------
Before we get into the best practices for using Docker, here’s a quick overview of the vocabulary you should know:
- Layer: a set of read-only files or commands that describe how to set up the underlying system beneath the container.
- Layers are built on top of each other, and each one represents a change to the filesystem.
- Image: an immutable layer that forms the base of the container.
- Container: an instance of the image that can be executed as an independent application.
- The container has a mutable layer that lies on top of the image and that is separate from the underlying layers.
- Registry: a storage and content delivery system used for distributing Docker images.
- Repository: a collection of related Docker images, often different versions of the same application.
Developing with Docker Containers:
- docker create [image]: Create a new container from a particular image.
- docker login: Log into the Docker Hub repository.
- docker pull [image]: Pull an image from the Docker Hub repository.
- docker push [username/image]: Push an image to the Docker Hub repository.
- docker search [term]: Search the Docker Hub repository for a particular term.
- docker tag [source] [target]: Create a target tag or alias that refers to a source image.
Running Docker Containers
- docker start [container]: Start a particular container.
- docker stop [container]: Stop a particular container.
- docker exec -ti [container] [command]: Run a shell command inside a particular container.
- docker run -ti — image [image] [container] [command]:
- Create and start a container at the same time, and then run a command inside it.
- docker run -ti — rm — image [image] [container] [command]:
- Create and start a container at the same time, run a command inside it,
- and then remove the container after executing the command.
- docker pause [container]: Pause all processes running within a particular container.
Using Docker Utilities:
- docker history [image]: Display the history of a particular image.
- docker images: List all of the images that are currently stored on the system.
- docker inspect [object]: Display low-level information about a particular Docker object.
- docker ps: List all of the containers that are currently running.
- docker version: Display the version of Docker that is currently installed on the system.
Cleaning Up Your Docker Environment:
- docker kill [container]: Kill a particular container.
- docker kill $(docker ps -q): Kill all containers that are currently running.
- docker rm [container]: Delete a particular container that is not currently running.
- docker rm $(docker ps -a -q): Delete all containers that are not currently running.
Hopefully this guide will serve as your go to Docker cheat sheet.If there is anything I missed, please let me know and I will happily add it.Docker: All the Most Essential Commands in One Place
Linux Mysql Command Help and Examples
Linux Mysql Command Help and Examples
Installation of MySql and Confiruring SSL
chown -R mysql:mysql /etc/mysql/newcerts
openssl genrsa 2048 > ca-key.pem
openssl x509 -req -in server-req.pem -days 1000 -CA ca-cert.pem -CAkey ca-key.pem -set_serial 01 > server-cert.pem
The Common Name used here must differ from the one used for the Certificate Authority and the Server certificate above.
openssl x509 -req -in client-req.pem -days 1000 -CA ca-cert.pem -CAkey ca-key.pem -set_serial 01 > client-cert.pem
ssl-ca=/etc/mysql/newcerts/ca-cert.pem
ssl-cert=/etc/mysql/newcerts/server-cert.pem
ssl-key=/etc/mysql/newcerts/server-key.pem
mysql> GRANT ALL ON *.* TO ‘root’@’%’ IDENTIFIED BY ‘mysql’ REQUIRE SSL;
mysql> quit
mysql –ssl-cert=ca-cert.pem –ssl-key=client-key.pem –ssl-cert=client-cert.pem -u root -p -v -v -v
Enter password: <password>
————–
SHOW STATUS LIKE ‘Ssl_cipher’
————–
| Variable_name | Value |
+—————+——————–+
| Ssl_cipher | DHE-RSA-AES256-SHA |
+—————+——————–+
1 row in set (0.00 sec)
————–
show variables like ‘%%ssl%%’
————–
| Variable_name | Value |
+—————+————————————-+
| have_openssl | YES |
| have_ssl | YES |
| ssl_ca | /etc/mysql/newcerts/ca-cert.pem |
| ssl_capath | |
| ssl_cert | /etc/mysql/newcerts/server-cert.pem |
| ssl_cipher | |
| ssl_key | /etc/mysql/newcerts/server-key.pem |
+—————+————————————-+
7 rows in set (0.01 sec)
mysql> GRANT ALL ON <db-name>.* TO <username>@'<IP Address>’ IDENTIFIED BY ‘PASSWORD';
# service iptables save
Stopping MySQL: [ OK ]
mysql> UPDATE user SET Password=PASSWORD(‘new_password’) WHERE user=’root';
mysql> FLUSH PRIVILEGES;
mysql> exit;
# service mysqld start
--------------------------------------------------------------
Linux (Redhat, Centos) iptables and firewalld Commands
#!/bin/bash
##HTTP
sudo firewall-cmd --zone=public --add-port=80/tcp --permanent
##SSH
sudo firewall-cmd --zone=public --add-port=22/tcp --permanent
##HTTPS
sudo firewall-cmd --zone=public --add-port=443/tcp --permanent
##UD Scanner / Ansible/HP SiteScope
sudo firewall-cmd --zone=public --add-port=8443/tcp --permanent
##JDBC
sudo firewall-cmd --zone=public --add-port=8888/tcp --permanent
##Ansible
sudo firewall-cmd --zone=public --add-port=5986/tcp --permanent
##MySql
sudo firewall-cmd --zone=public --add-port=3306/tcp --permanent
##MacaFee
sudo firewall-cmd --zone=public --add-port=57398/tcp --permanent
##SyslogNG
sudo firewall-cmd --zone=public --add-port=3164/tcp --permanent
sudo firewall-cmd --zone=public --add-port=314/tcp --permanent
##NTP(udp/tcp)
sudo firewall-cmd --zone=public --add-port=123/udp --permanent
##OpenVPN
sudo firewall-cmd --zone=public --add-port=1194/tcp --permanent
##Chronyd
sudo firewall-cmd --zone=public --add-port=323/udp --permanent
##Reload service
sudo firewall-cmd --reload
sudo firewall-cmd --list-all
sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="22" accept'
sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="udp" port="123" accept'
sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="80" accept'
sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="443" accept'
sudo firewall-cmd --permanent --zone=public --add-rich-rule=' rule family="ipv4" source address="xxx.xxx.xxx.xxx/24" port protocol="tcp" port="8443" accept'
#!/bin/bash
## Default firewall Rules applied on VM
## Version 1.0 ##
## Author: SKN ##
sudo systemctl start firewalld
sudo systemctl enable firewalld
sudo firewall-cmd --state
sudo firewall-cmd --reload
sudo systemctl status firewalld
sudo firewall-cmd --get-zones
sudo firewall-cmd --get-default-zone
sudo firewall-cmd --get-active-zones
sudo firewall-cmd --list-all
sudo firewall-cmd --list-services --zone=public
sudo firewall-cmd --zone=public --add-service=http --permanent
sudo firewall-cmd --zone=public --add-service=https --permanent
sudo firewall-cmd --zone=public --add-service=ssh --permanent
sudo firewall-cmd --zone=public --add-service=chrony --permanent
sudo firewall-cmd --zone=public --add-service=crond --permanent
sudo firewall-cmd --zone=public --add-service=syslog --permanent
sudo firewall-cmd --reload
## Get all Zone Details
for z in $(firewall-cmd --get-zones)
do
echo "Services allowed in $z zone: $(sudo firewall-cmd --list-services --zone=$z)"
done
IPT='iptables'
$IPT -F
$IPT -P INPUT DROP
$IPT -P FORWARD DROP
$IPT -P OUTPUT ACCEPT
#Accept on localhost
$IPT -A INPUT -i lo -j ACCEPT
$IPT -A OUTPUT -o lo -j ACCEPT
#Allow established sessions to receive traffic and ssh
$IPT -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
$IPT -I INPUT -p tcp --dport 22 -j ACCEPT
iptables-save
$IPT -L -v
iptables - How-to - With Examples
Enable the service on persistent reboot.
chkconfig iptables on
How to Start and Stop service
/etc/init.d/iptables start
/etc/init.d/iptables start
/etc/init.d/iptables status
Following are the possible special values that you can specify in the target.
ACCEPT – Firewall will accept the packet.
DROP – Firewall will drop the packet.
QUEUE – Firewall will pass the packet to the userspace.
RETURN – Firewall will stop executing the next set of rules in the current chain for this packet.
The control will be returned to the calling chain.
List the rules
# iptables -L -n -v
The rules in the iptables –list command output contains the following fields:
num – Rule number within the particular chain
target – Special target variable that we discussed above
prot – Protocols. tcp, udp, icmp, etc.,
opt – Special options for that specific rule.
source – Source ip-address of the packet
destination – Destination ip-address for the packet
Before you start building new set of rules, you might want to clean-up all the default rules, and existing rules.
# iptables -F
(or)
# iptables --flush
The default chain policy is ACCEPT. Change this to DROP for all INPUT, FORWARD,
and OUTPUT chains as shown below.
#iptables -P INPUT DROP
#iptables -P FORWARD DROP
#iptables -P OUTPUT DROP
# Local loop
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT
# Connections already established
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
# PING
iptables -A INPUT -p icmp --icmp-type 8 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
iptables -A OUTPUT -p icmp --icmp-type 8 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
# DNS
# UDP
iptables -A INPUT -i eth0 -p udp --sport 53 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p udp --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
# TCP
iptables -A INPUT -i eth0 -p tcp --sport 53 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
# SSH
# Incoming
iptables -A INPUT -i eth0 -p tcp --dport 22 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 22 -m state --state ESTABLISHED -j ACCEPT
# Outgoing
iptables -A INPUT -i eth0 -p tcp --sport 22 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 22 -m state --state NEW,ESTABLISHED -j ACCEPT
# HTTP
Incoming
iptables -A INPUT -i eth0 -p tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 80 -m state --state ESTABLISHED -j ACCEPT
Outgoing
iptables -A INPUT -i eth0 -p tcp --sport 80 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 80 -m state --state NEW,ESTABLISHED -j ACCEPT
#HTTPS
Incoming
iptables -A INPUT -i eth0 -p tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 443 -m state --state ESTABLISHED -j ACCEPT
Outgoing
iptables -A INPUT -i eth0 -p tcp --sport 443 -m state --state ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --dport 443 -m state --state NEW,ESTABLISHED -j ACCEPT
# FTP
Incoming
iptables -A INPUT -i eth0 -p tcp --dport 21 -m state --state NEW,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -o eth0 -p tcp --sport 21 -m state --state ESTABLISHED -j ACCEPT
Logs
iptables -A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 4
At the End DROP everything else except above rules
DROP everything else
iptables -A INPUT -j DROP
iptables -A OUTPUT -j DROP
iptables -A FORWARD -j DROP
-- eth0 should be replaced with respective device.
-- Only use required rules on your device.
Table 11-3. DSCP target options
Option
--set-dscp
Example
iptables -A OUTPUT -p tcp --dport 53 -j DSCP --set-dscp 1
Explanation
This sets the DSCP value to the specified value. The values can be set either via class, see below, or with the --set-dscp, which takes either an integer value, or a hex value.
Option
--set-dscp-class
Example
iptables -A OUTPUT -p tcp --dport 53 -j DSCP --set-dscp-class AF21
Explanation
This sets the DSCP field according to a predefined DiffServ class. Some of the possible values are EF, BE and the CSxx and AFxx values available. You can find more information at Implementing Quality of Service Policies with DSCP site. Do note that the --set-dscp-class and --set-dscp commands are mutually exclusive, which means you can not use both of them in the same command!
iptables -t mangle -A OUTPUT -p udp -m udp --sport <SRCPRT> --dport 53 -J DSCP --set-dscp-class af21
Please use the following command to allow outgoing DNS requests.
iptables -A OUTPUT -p udp -m udp --dport 53 -j ACCEPT
iptables -A OUTPUT -p udp -m udp --sport 53 -j DSCP --set-dscp 0x10
The iptables firewall can modify packets in the mangle table.
For locally generated traffic, use the OUTPUT or POSTROUTING chain.
Some possible examples are:
# iptables -t mangle -A OUTPUT -p udp --dport 53 -j DSCP --set-dscp-class AF11
# iptables -t mangle -A POSTROUTING -p udp -m udp --sport 8000:8009 -j DSCP --set-dscp 0x0a
We can make use of the mangle table - which allows us to modify packets before they leave the system.
iptables -t mangle -A OUTPUT -j DSCP --set-dscp-class AF21 -m comment --comment "set dscp class to AF21 for all outbound traffic"
FIREWALLD - The firewalld-cmd utility:
firewalld has the following advantages over iptables :
1. Unlike the iptables command, the firewall-cmd command does not restart the firewall and disrupt established TCP connections.
2. firewalld supports dynamic zones.
3. firewalld supports D-Bus for better integration with services that depend on firewall configuration.
Configuration options
The firewalld service has two types of configuration options:
1. Runtime: Changes to firewall settings take effect immediately but are not permanent. Changes made in runtime configuration mode are lost when the firewalld service is restarted.
2. Permanent: Changes to firewall settings are written to configuration files. These changes are applied when the firewalld service restarts.
Configuration files
Configuration files for firewalld exist in two directories:
/usr/lib/firewalld: Contains default configuration files. Do not make changes to these files. An upgrade of the firewalld package overwrites this directory.
/etc/firewalld: Changes to the default configuration files are stored in this directory.Files in this directory overload the default configuration files.
firewalld zones
The firewalld service allows you to separate networks into different zones based on the level of trust you want to place on the devices and traffic within a specific network. For each zone you can define the following features:
Services: Predefined or custom services to trust. Trusted services are a combination of ports and protocols that are accessible from other systems and networks.
Ports: Additional ports or port ranges and associated protocols that are accessible from other systems and networks.
Masquerading: Translate IPv4 addresses to a single external address. With masquerading enabled, addresses of a private network are mapped to and hidden behind a public address.
Port Forwarding: Forward inbound network traffic from a specific port or port range to an alternative port on the local system, or to a port on another IPv4 address.
ICMP Filter: Block selected Internet Control Message Protocol messages.
Rich Rules: Extend existing firewalld rules to include additional source and destination addresses and logging and auditing actions.
Interfaces: Network interfaces bound to the zone. The zone for an interface is specified with the ZONE=option in the /etc/sysconfig/network-scripts/ifcfg file. If the option is missing, the interface is bound to the default zone.
Predefined firewalld Zones
The firewalld software package includes a set of predefined network zones in the following directory:
# ls -lrt /usr/lib/firewalld/zones/
total 36
-rw-r----- 1 root root 342 Sep 15 2015 work.xml
-rw-r----- 1 root root 162 Sep 15 2015 trusted.xml
-rw-r----- 1 root root 315 Sep 15 2015 public.xml
-rw-r----- 1 root root 415 Sep 15 2015 internal.xml
-rw-r----- 1 root root 400 Sep 15 2015 home.xml
-rw-r----- 1 root root 304 Sep 15 2015 external.xml
-rw-r----- 1 root root 291 Sep 15 2015 drop.xml
-rw-r----- 1 root root 293 Sep 15 2015 dmz.xml
-rw-r----- 1 root root 299 Sep 15 2015 block.xml
The zone files contain preset settings, which can be applied to a network interface. For example:
# grep –i service /usr/lib/firewalld/zones/public.xml
<service name=“ssh”/>
<service name=“dhcpv6-client”/>
In this example, network interfaces bound to the public zone trust only two services, ssh and dhcpv6-client.
A brief explanation of each zone follows:
drop: Any incoming network packets are dropped, there is no reply. Only outgoing
network connections are possible.
block: Any incoming network connections are rejected with an icmp-host- prohibited message for IPv4 and icmp6-adm-prohibited for IPv6. Only network connections initiated from within the system are possible.
home: For use in home areas. You mostly trust the other computers on networks to not harm your computer. Only selected incoming connections are accepted.
public: For use in public areas. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted.
work: For use in work areas. You mostly trust the other computers on networks to not harm your computer. Only selected incoming connections are accepted.
dmz: For computers in your demilitarized zone that are publicly accessible with limited access to your internal network. Only selected incoming connections are accepted.
external: For use on external networks with masquerading enabled especially for routers. You do not trust the other computers on the network to not harm your computer. Only selected incoming connections are accepted.
internal: For use on internal networks. You mostly trust the other computers on the networks to not harm your computer. Only selected incoming connections are accepted.
trusted: All network connections are accepted.
Setting the Default firewalld Zone
After an initial installation, the public zone is the default zone as specified in the configuration file, /etc/firewalld/firewalld.conf.
# grep –i defaultzone /etc/firewalld/firewalld.conf
DefaultZone=public
Network interfaces are bound to the default zone unless specified with ZONE=[zone] in the ifcfg file. The following command shows the interfaces that are bound to the public zone:
# firewall-cmd --get-active-zone
public
interfaces: eth0 eth1
You can use the firewall-cmd command to change the default zone:
# firewall-cmd --set-default-zone=work
success
You can also use the firewall-config GUI to change the default zone. From the menu bar, select Options->Change Default Zone, and then select a zone from a pop-up list.
firewalld Services
– A firewalld service is a combination of local ports and protocols and destination addresses.
– A firewalld service can also include Netfilter kernel modules that are automatically loaded when a service is enabled.
– The firewalld software package includes a set of predefined services in the following directory:
# ls -lrt /usr/lib/firewalld/zones/
total 36
-rw-r----- 1 root root 342 Sep 15 2015 work.xml
-rw-r----- 1 root root 162 Sep 15 2015 trusted.xml
-rw-r----- 1 root root 315 Sep 15 2015 public.xml
-rw-r----- 1 root root 415 Sep 15 2015 internal.xml
-rw-r----- 1 root root 400 Sep 15 2015 home.xml
-rw-r----- 1 root root 304 Sep 15 2015 external.xml
-rw-r----- 1 root root 291 Sep 15 2015 drop.xml
-rw-r----- 1 root root 293 Sep 15 2015 dmz.xml
-rw-r----- 1 root root 299 Sep 15 2015 block.xml
– Services can be enabled for a zone in Runtime mode.
– Service definitions can only be edited in Permanent mode.
Start firewalld
To start firewalld:
# systemctl start firewalld
To ensure firewalld starts at boot time:
# systemctl enable firewalld
To check if firewalld is running:
# systemctl status firewalld
# firewall-cmd --state
Three methods to configure the firewalld service:
– firewall-cmd : Command-line interface
– firewall-config : Graphical user interface
– Edit various XML configuration files.
CentOS / RHEL 7/8 : How to start / Stop Firewalld
The command-line tool firewall-cmd is part of the firewalld application, which is installed by default. To get help on the firewall-cmd command:
# firewall-cmd --help
The firewall-cmd command offers categories of options such as General, Status, Permanent, Zone, IcmpType, Service, Adapt and Query Zones, Direct, Lockdown, Lockdown Whitelist, and Panic. To list information for all zones:
# firewall-cmd --list-all-zones public (default, active)
interfaces: eth0 eth1
sources:
services: dhcpv6-client ssh
ports:
...
To permit access by HTTP clients for the public zone:
# firewall-cmd --zone=public --add-service=http
success
To list services that are allowed for the public zone:
# firewall-cmd --zone=work --list-services
dhcpv6-client http ssh
Using this command only changes the Runtime configuration and does not update the configuration files.
The configuration changes made in Runtime configuration mode are lost when the firewalld service is restarted:
# systemctl restart firewalld
# firewall-cmd --zone=work --list-services dhcpv6-client ssh
To make changes permanent, use the –permanent option. Example:
# firewall-cmd --permanent --zone=public --add-service=http
success
Changes made in Permanent configuration mode are not implemented immediately. However, changes made in Permanent configuration are written to configuration files. Restarting the firewalld service reads the configuration files and implements the changes. Example:
# systemctl restart firewalld
# firewall-cmd --zone=work --list-services
dhcpv6-client http ssh
Set up Magento 2 with Redis, Varnish and Nginx as SSL termination
Table of Contents
Login to your VPS via SSH
Update the system and install necessary packages
Install MariaDB 10.6
Install PHP 7.0, composer and all required PHP modules
Install Magento 2 from Github
Install and configure Nginx
Install and configure Varnish
Install and configure Redis caching
Further Optimizations
Login to your VPS via SSH
ssh my_sudo_user@my_server
Update the system and install necessary packages
sudo apt-get update && sudo apt-get -y upgrade
sudo apt-get -y install curl nano git
Install MariaDB 10.6
Install the latest MariaDB server from the official Ubuntu repositories:
sudo apt-get install -y mariadb-server-10.6
When the installation is complete, run the following command to secure your installation:
mysql_secure_installation
Next, we need to create a database for our Magento installation.
mysql -uroot -p
MariaDB [(none)]> CREATE DATABASE magento;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON magento.* TO 'magento'@'localhost' IDENTIFIED BY 'my_strong_password';
MariaDB [(none)]> FLUSH PRIVILEGES;
MariaDB [(none)]> \q
Install PHP 7.4, composer and all required PHP modules
To install the latest stable version of PHP 7.4 and all necessary modules, run:
sudo apt-get -y install php-fpm php-cli php-gd php-imagick php-mysql php-mcrypt php-pear php-curl php-intl php-xsl php-zip php-mbstring
sudo sed -i "s/memory_limit = .*/memory_limit = 256M/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/upload_max_filesize = .*/upload_max_filesize = 128M/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/zlib.output_compression = .*/zlib.output_compression = on/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/max_execution_time = .*/max_execution_time = 18000/" /etc/php/7.0/fpm/php.ini
The composer is a dependency manager for PHP with which you can install packages. The composer will pull in all the required libraries and dependencies you need for your project.
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
Install Magento 2 from Github
Clone the Magento repository to the ~/myMagentoSite.com directory using the following command:
sudo git clone https://github.com/magento/magento2.git /var/www/myMagentoSite.com
Get the latest stable release, at the time of the writing it’s Magento 2.4.4:
cd /var/www/myMagentoSite.com
sudo git checkout $(git describe --tags $(git rev-list --tags --max-count=1))
Run composer to install all Magento dependencies:
sudo composer install
To continue with the installation you can either use the installation wizard or the command line, in this guide we will use the latter.
sudo bin/magento setup:install \
--base-url=http://myMagentoSite.com/ \
--db-host=localhost \
--db-name=magento \
--db-user=magento \
--db-password=my_strong_password \
--admin-firstname=First \
--admin-lastname=Last \
--admin-email=user@myMagentoSite.com \
--admin-user=admin \
--admin-password=my_strong_password123 \
--language=en_US \
--currency=USD \
--timezone=America/Chicago \
--use-rewrites=1
If the installation is successful you will see something like below:
[SUCCESS]: Magento installation complete.
[SUCCESS]: Magento Admin URI: /admin_mejj1n
Run the crontab command to create a cronjob
crontab -u www-data -e
and add the following line:
* * * * * /usr/bin/php /var/www/myMagentoSite.com/bin/magento cron:run | grep -v "Ran jobs by schedule" >> /var/www/myMagentoSite.com/var/log/magento.cron.log
Finally, set the correct permissions:
sudo chown -R www-data: /var/www/myMagentoSite.com
Install and configure Nginx
Install Nginx from the official Ubuntu repositories::
sudo apt-get -y install nginx
Create a new Nginx server block with the following content:
sudo nano /etc/nginx/sites-available/myMagentoSite.com
upstream fastcgi_backend {
server unix:/run/php/php7.0-fpm.sock;
}
server {
server_name myMagentoSite.com www.myMagentoSite.com;
listen 80;
set $MAGE_ROOT /var/www/myMagentoSite.com;
set $MAGE_MODE developer; # or production
access_log /var/log/nginx/myMagentoSite.com-access.log;
error_log /var/log/nginx/myMagentoSite.com-error.log;
include /var/www/myMagentoSite.com/nginx.conf.sample;
}
Activate the server block by creating a symbolic link :
sudo ln -s /etc/nginx/sites-available/myMagentoSite.com /etc/nginx/sites-enabled/myMagentoSite.com
Delete the default configuration:
sudo rm -f /etc/nginx/sites-enabled/default
Test the Nginx configuration and restart nginx:
sudo nginx -t
sudo service nginx restart
You should be now able to login to your Magento back-end by going to http://myMagentoSite.com/admin_mejj1n using the information you set when running the bin/magento setup:install .
Install and configure Varnish
Installing Varnish is as simple as running the following command:
sudo apt-get install varnish
From your Magento Admin dashboard click on the STORES link (left sidebar) -> Configuration -> ADVANCED -> System -> Full Page Cache
Unselected Use system value and from the Caching Application list, select Varnish Cache (Recommended), save the configuration, click on the Varnish Configuration link and click on the Export VCL for Varnish 4 button. The varnish.vcl file which we will use will be exported in the directory /var/www/myMagentoSite.com/var/.
Flush the Magento cache with:
sudo php bin/magento cache:flush
Delete the /etc/varnish/default.vcl and symlink it to the exported varnish configuration.
sudo rm -f /etc/varnish/default.vcl
sudo ln -sf /var/www/myMagentoSite.com/var/varnish.vcl /etc/varnish/default.vcl
To change the varnish port from 6081 to 80, we need to edit the systemd service configuration.
Create a new customexec.conf file
sudo mkdir -p /etc/systemd/system/varnish.service.d
sudo nano /etc/systemd/system/varnish.service.d/customexec.conf
paste the following:
[Service]
ExecStart=
ExecStart=/usr/sbin/varnishd -j unix,user=vcache -F -a :80 -T localhost:6082 -f /etc/varnish/default.vcl -S /etc/varnish/secret -s malloc,256m
and reload systemd units
sudo systemctl daemon-reload
Now we need to change Nginx listening port from 80 to 8080 and enable Nginx SSL termination with HTTP2, to do that open the Nginx configuration file and change it as follows:
sudo nano /etc/nginx/sites-available/myMagentoSite.com
upstream fastcgi_backend {
server unix:/run/php/php7.0-fpm.sock;
}
server {
server_name myMagentoSite.com www.myMagentoSite.com;
listen 8080;
set $MAGE_ROOT /var/www/myMagentoSite.com;
set $MAGE_MODE production; # or developer
access_log /var/log/nginx/myMagentoSite.com-access.log;
error_log /var/log/nginx/myMagentoSite.com-error.log;
include /var/www/myMagentoSite.com/nginx.conf.sample;
}
server {
listen 443 ssl http2;
server_name myMagentoSite.com www.myMagentoSite.com;
ssl_certificate /etc/ssl/certs/ssl-cert-snakeoil.pem; # change with your SSL cert
ssl_certificate_key /etc/ssl/private/ssl-cert-snakeoil.key; # change with your SSL key
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_ciphers 'AES128+EECDH:AES128+EDH:!aNULL';
ssl_session_cache shared:SSL:10m;
ssl_session_timeout 24h;
keepalive_timeout 300s;
location / {
proxy_pass http://127.0.0.1;
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Ssl-Offloaded "1";
proxy_set_header X-Forwarded-Proto https;
proxy_set_header X-Forwarded-Port 443;
#proxy_hide_header X-Varnish;
#proxy_hide_header Via;
proxy_set_header X-Forwarded-Proto $scheme;
}
}
If you don’t already have an SSL certificate, you can purchase a trusted SSL certificate.
Restart Varnish and Nginx:
sudo systemctl restart nginx
sudo systemctl restart varnish
Change the base URL to https and flush the cache
sudo bin/magento setup:store-config:set --base-url="https://myMagentoSite.com"
sudo php bin/magento cache:flush
If everything is setup correctly now you should be able to login to your Magento back-end by going to https://myMagentoSite.com/admin_mejj1n.
Install and configure Redis caching
Redis is a key-value in memory data store and we will use it to replace the default Magento 2 Zend_Cache_Backend_File backend cache. Install Redis by running the following command:
apt-get install php-redis redis-server
To configure your Magento installation to use Redis for session storage open the app/etc/env.php file and change/add the following:
sudo nano /var/www/myMagentoSite.com/app/etc/env.php
change:
'session' =>
array (
'save' => 'files',
),
with:
'session' =>
array (
'save' => 'redis',
'redis' =>
array (
'host' => '127.0.0.1',
'port' => '6379',
'password' => '',
'timeout' => '2.5',
'persistent_identifier' => '',
'database' => '0',
'compression_threshold' => '2048',
'compression_library' => 'gzip',
'log_level' => '1',
'max_concurrency' => '6',
'break_after_frontend' => '5',
'break_after_adminhtml' => '30',
'first_lifetime' => '600',
'bot_first_lifetime' => '60',
'bot_lifetime' => '7200',
'disable_locking' => '0',
'min_lifetime' => '60',
'max_lifetime' => '2592000'
)
),
and to use Redis for page caching add:
'cache' =>
array(
'frontend' =>
array(
'default' =>
array(
'backend' => 'Cm_Cache_Backend_Redis',
'backend_options' =>
array(
'server' => '127.0.0.1',
'port' => '6379'
),
),
'page_cache' =>
array(
'backend' => 'Cm_Cache_Backend_Redis',
'backend_options' =>
array(
'server' => '127.0.0.1',
'port' => '6379',
'database' => '1',
'compress_data' => '0'
)
)
)
),
Finally flush the cache again:
sudo php bin/magento cache:flush
Further Optimizations
To further optimize your Magento installation from you Magento admin dashboard:
1. Go to STORES -> Configuration -> CATALOG -> Catalog -> Use Flat Catalog Category, select Yes and click Save Config.
2. Go to STORES -> Configuration -> ADVANCED -> Developer -> JavaScript Settings and set both Merge JavaScript Files and Minify JavaScript Files to Yes and click Save Config..
3. Go to STORES -> Configuration -> ADVANCED -> Developer -> CSS Settings and set both Merge CSS Files and Minify CSS Files to Yes and click Save Config.
4. Consider using a CDN – Content Delivery Network
Do not forget to flush the cache:
sudo php bin/magento cache:flush
That’s it. You have successfully installed Magento 2 with Redis as a session storage and page caching, Varnish as a full page caching and Nginx as SSL termination on your Ubuntu 16.04 VPS. For more information about how to manage your Magento installation, please refer to the official Magento documentation.
Of course, you don’t have to do any of this if you use one of our Magento VPS Hosting services, in which case you can simply ask our expert Linux admins to setup this for you. They are available 24×7 and will take care of your request immediately.
PS. If you liked this post please share it with your friends on the social networks using the buttons on the left or simply leave a reply below. Thanks.
RED-HAT SATALLITE AND SPACEWALK
Register system to your spacewalk
subscription-manager Unable to verify server's identity: certificate verify failed
Registering to: dcplabsat8.local:8443/rhsm
Unable to verify server's identity: certificate verify failed
Fix =
Update following config to point correct URL.
[root@web-ser1 tmp]# vim /etc/rhsm/rhsm.conf
# Server hostname:
If you feel, conf file mess up then simply remove following package & install again.
All should be fine.
# yum remove python-rhsm subscription-manager
# yum clean all
[root@localhost ~]# subscription-manager release --set=x.y No releases match 'x.y'. Consult 'release --list' for a full listing.
Resolution
Reinstall
subscription-managerpackage and then perform set release.If same issue is noticed even after re-installing subscription-manager package then set release while registering the system using following commands.
Satellite 6: How to enable and synchronize repositories via command line?
hammer shell --username <username of satellite admin account> --password <password>
hammer> product list --organization-label Default_Organization
hammer> repository-set list --organization-label Default_Organization --product "Red Hat Enterprise Linux Server"
hammer > repository-set available-repositories --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 168
For enable the base repo
repository-set enable --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 168 --releasever 6Server --basearch x86_64
For enable any child repo
repository-set enable --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 168 --releasever 6.3 --basearch x86_64
hammer> repository synchronize --async --organization-label Default_Organization --product "Red Hat Enterprise Linux Server" --id 1
Create a Local Yum Repository for Oracle Linux 8
Configure Server Repositories
Repository Creation
Resync the Repository
Setup the HTTP Server
Point Servers to the Local Repository
Make sure the repositories of interest are available on the server,
# vim oel8-tmp.repo
[ol8_baseos_latest]
name=Oracle Linux $releasever BaseOS ($basearch)
baseurl=https://yum.oracle.com/repo/OracleLinux/OL8/baseos/latest/$basearch
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
gpgcheck=1
enabled=1
Install OEL8 related repo files from below command
# dnf reinstall oraclelinux-release-el8
# dnf clean packages
# dnf install oracle-epel-release-el8.x86_64 oracle-gluster-release-el8.x86_64 oracle-spacewalk-client-release-el8.x86_64 oraclelinux-developer-release-el8.x86_64 oraclelinux-release-el8.x86_64
# dnf install oracle-epel-release-el8.x86_64 oracle-gluster-release-el8.x86_64 oracle-spacewalk-client-release-el8.x86_64 oraclel
# curl https://yum.oracle.com/RPM-GPG-KEY-oracle-ol8 -o RPM-GPG-KEY-oracle
# gpg --quiet --keyid-format 0xlong --with-fingerprint RPM-GPG-KEY-oracle
# yum install yum-utils createrepo
This may install some additional packages to support other repositories, depending on what repositories you already had enabled before you started the process. Let's take a look at the contents of the "/etc/yum.repos.d" directory now.
-rw-r--r--. 1 root root 216 Sep 22 08:49 oel8-tmp.repo
-rw-r--r--. 1 root root 212 Sep 24 12:41 ol8-addon.repo
-rw-r--r--. 1 root root 252 Jul 23 05:50 oracle-epel-ol8.repo
-rw-r--r--. 1 root root 246 Mar 4 2020 oracle-gluster-ol8.repo
-rw-r--r--. 1 root root 459 Dec 13 2019 oraclelinux-developer-ol8.repo
-rw-r--r--. 1 root root 1565 Apr 28 21:05 oracle-linux-ol8.repo
-rw-r--r--. 1 root root 249 Aug 5 00:59 oracle-spacewalk-client-ol8.repo
-rw-r--r--. 1 root root 470 Jul 23 05:50 uek-ol8.repo
Repository Creation
Install the following packages, which include the utilities necessary to set up the repository.
# yum install yum-utils createrepo
Create the following directories to hold the main OS and UEK respoitories.
# mkdir -p /u01/repo/OracleLinux
# mkdir -p /u01/repo/logs
# mkdir -p /u01/repo/scripts
If you've done a default installation of Oracle Linux 8, the "ol8_baseos_latest" and "ol8_UEKR6" repositories should already be enabled in the "/etc/yum.repos.d/public-yum-ol8.repo" file, but it's worth checking before you continue.
The reposync command is used to synchronize a remote yum repository to a local directory, using yum to retrieve the packages.
#/usr/bin/reposync --newest-only --repoid=ol8_baseos_latest -p /u01/repo/OracleLinux
# /usr/bin/reposync --newest-only --repoid=ol8_UEKR6 -p /u01/repo/OracleLinux
# /usr/bin/reposync --newest-only --repoid=ol8_appstream -p /u01/repo/OracleLinux
# /usr/bin/reposync --newest-only --repoid=ol8_spacewalk210_client --repoid=ol8_gluster_appstream -p /u01/repo/OracleLinux
# /usr/bin/reposync --newest-only --repoid=ol8_developer -p /u01/repo/OracleLinux
# /usr/bin/reposync --newest-only --repoid=ol8_addons -p /u01/repo/OracleLinux
It takes a long time to sync the repositories the first time, so be patient. I waited overnight for the 32G of downloads to complete. Subsequent refreshes only bring across the changed packages, so they are much quicker. The "newest-only" option reduces the total size of the download.
Once complete, you can create the repositories from the local directories using the createrepo command.
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_baseos_latest/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_appstream/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_gluster_appstream/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_addons/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer_EPEL/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_spacewalk210_client/getPackage
# /usr/bin/createrepo /u01/repo/OracleLinux/ol8_UEKR6/getPackage
Resync the Repository
A resync of the Yum repositories involves repeating the reposync and createrepo commands, so you should script them and run them from CRON. Create a script called "/u01/repo/scripts/repo_sync.sh" with the following contents.
#!/bin/bash
## Download Latest Repo (RPM's) from OEL Repository 8 (OEL 8)
LOG_FILE=/u01/repo/logs/repo_sync_$(date +%Y.%m.%d).log
# Remove old logs
find /u01/repo/logs/repo_sync* -mtime +5 -delete; >> $LOG_FILE 2>&1
# Sync repositories
/usr/bin/reposync --newest-only --repoid=ol8_baseos_latest -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_developer -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_developer_EPEL -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_spacewalk210_client -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_UEKR6 -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_appstream -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_gluster_appstream -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/reposync --newest-only --repoid=ol8_addons -p /u01/repo/OracleLinux >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_baseos_latest/getPackage/ >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer4/getPackage/ >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_developer_EPEL/getPackage/ >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_spacewalk210_client/getPackage/ >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_UEKR6/getPackage/ >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_gluster_appstream/getPackage/ >> $LOG_FILE 2>&1
/usr/bin/createrepo /u01/repo/OracleLinux/ol8_addons/getPackage/ >> $LOG_FILE 2>&1
Make the file executable.
# chmod u+x /u01/repo/scripts/repo_sync.sh
Set up a CRON job to run the script on a daily basis. The following entry runs the script each day at 01:00.
0 1 * * * /u01/repo/scripts/repo_sync.sh > /dev/null 2>&1
Setup the HTTP Server
Install the Apache HTTP servers, start it and make sure it restarts automatically on reboot.
# yum install httpd
# systemctl start httpd
# systemctl enable httpd
If you are using the Linux firewall you will need to punch a hole for port 80.
# firewall-cmd --permanent --zone=public --add-port=80/tcp
# firewall-cmd --reload
Either set SELinux to permissive, or configure the fcontext for the repository files as shown below.
# # One-off configuration.
# yum install policycoreutils-python -y
# semanage fcontext -a -t httpd_sys_content_t "/u01/repo/OracleLinux(/.*)?"
# # Run each time the repo contents change.
# restorecon -F -R -v /u01/repo/OracleLinux
Present the repositories using the HTTP server.
# mkdir -p /var/www/html/repo/OracleLinux/ol8_latest
# mkdir -p /var/www/html/repo/OracleLinux/ol8_developer
# mkdir -p /var/www/html/repo/OracleLinux/ol8_developer_EPEL
# mkdir -p /var/www/html/repo/OracleLinux/ol8_spacewalk210_client
# mkdir -p /var/www/html/repo/OracleLinux/ol8_UEKR6
# mkdir -p /var/www/html/repo/OracleLinux/ol8_appstream
# mkdir -p /var/www/html/repo/OracleLinux/ol8_gluster_appstream
# mkdir -p /var/www/html/repo/OracleLinux/ol8_addons
# ln -s /u01/repo/OracleLinux/ ol8_latest /getPackage /var/www/html/repo/OracleLinux/ol8_latest/x86_64
# ln -s /u01/repo/OracleLinux/ol8_developer/getPackage /var/www/html/repo/OracleLinux/ol8_developer/x86_64
# ln -s /u01/repo/OracleLinux/ol8_developer_EPEL/getPackage /var/www/html/repo/OracleLinux/ol8_developer_EPEL/x86_64
# ln -s /u01/repo/OracleLinux/ol8_spacewalk210_client/getPackage /var/www/html/repo/OracleLinux/ol8_spacewalk210_client/x86_64
# ln -s /u01/repo/OracleLinux/ol8_gluster_appstream/getPackage /var/www/html/repo/OracleLinux/ol8_gluster_appstream/x86_64
# ln -s /u01/repo/OracleLinux/ol8_UEKR6/getPackage /var/www/html/repo/OracleLinux/ol8_UEKR6/x86_64
# ln -s /u01/repo/OracleLinux/ol8_appstream/getPackage /var/www/html/repo/OracleLinux/ ol8_appstream/x86_64
# ln -s /u01/repo/OracleLinux/ol8_addons/getPackage /var/www/html/ OracleLinux/ol8_addons/x86_64
Point Servers to the Local Repository
To allow a server to use the local Yum repositories, create a file called "/etc/yum.repos.d/local-ol8.repo" with the following contents, where "ol8-yum.localdomain" is the name of the server with the Yum repositories.
[local_ol8_baseos_latest]
name=Oracle Linux $releasever Latest ($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_latest/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
enabled=1
[local_ol8_developer]
name=Latest Oracle Linux $releasever Development Packages ($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_developer/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
enabled=1
[local_ol8_developer_EPEL]
name=Latest Oracle Linux $releasever EPEL Packages for Development ($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_developer_EPEL/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
enabled=1
[local_ol8_appstream]
name=Oracle Linux $releasever Application Stream ($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_appstream/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
enabled=1
[local_ol8_gluster_appstream]
name= Oracle Linux $releasever Gluster Appstream($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_gluster_appstream/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
enabled=1
name=Latest Spacewalk Client 2.10 for Oracle Linux 8($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_spacewalk210_client/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
enabled=1
[local_ol8_UEKR6]
name=Latest Unbreakable Enterprise Kernel for Oracle Linux $releasever ($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_UEKR6/$basearch/
gpgkey=http://ol8-yum.localdomain/RPM-GPG-KEY-oracle-ol8
gpgcheck=1
[ol8_addons]
name=Oracle Linux $releasever Addons Packages ($basearch)
baseurl=http://ol8-yum.localdomain/repo/OracleLinux/ol8_addons/$basearch/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-oracle
gpgcheck=1
enabled=1
You may also want to consider installing the following package, to make sure you pick the fastest mirror, which should be your local one.
# yum install yum-plugin-fastestmirror
Essential Git Commands
Categories are as follows:
- To create
- To make local changes
- To commit history
- Branches and tags
- To update and publish
- To merge and reuse
- To undo
Let us do define commands that do fall under these categories that are listed below as follows:
Type 1: CREATE
- Clone an existing repository: git clone
- Create a new local repository: git init
Type 2: LOCAL CHANGES
- Changed files in your working directory: git status
- Changes to tracked files: git diff
- Add all current changes to the next commit: git add
- Add some changes to the next commit: git add -p
- Commit all local changes In tracked files: git commit -a
- Commit previously staged changes: git commit
- Change the last commit: git commit –amend
Type 3: COMMIT HISTORY
- Show all commits. starting with newest: git log
- Show changes over time for a specific file: git log -p
- Who changed what and when in: git blame
Type 4: BRANCHES & TAGS
- List all existing branches: git branch -av
- Switch HEAD branch: git checkout
- Create a new branch based on your current HEAD: git branch
- Create a new tracking branch based on a remote branch: git checkout – -track
- Delete a local branch: git branch -d
- Mark the current commit with a tag: git tag
Type 5: UPDATE and PUBLISH
- List all currently configured remotes: git remote -v
- Show Information about a remote: git remote show
- Add new remote repository, named remote: git remote add
- Download all changes from but don’t integrate into HEAD: git fetch
- Download changes and directly merge/integrate into HEAD: git pull
- Publish local changes on a remote: git push
- Delete a branch on the remote: git branch -dr
- Publish your tags: git push –tags
Type 6: MERGE & REUSE
- Merge into your current HEAD: git merge
- Rebase your current HEAD onto git rebase
- Abort a rebase: git rebase – -abort
- Continue a rebase after resolving conflicts: git rebase – -continue
- Use your configured merge tool to solve conflicts: git mergetool
- Use your editor to manually solve conflicts and (after resolving) mark tile as resolved: git add, git rm
Type 7: UNDO
- Discard all local changes in your working directory: git reset -hard HEAD
- Discard local changes in a specific file: git checkout HEAD
- Revert a commit (by producing a new commit with contrary changes): git revert
- Reset your HEAD pointer to a previous commit and discard all changes since then: git reset –hard
- Preserve all changes as unstaged changes: git reset
- Preserve uncommitted local changes: git reset – – keep
The Open SSH
OpenSSH is an implementation of the SSH protocol supported by Linux, UNIX, and similar operating systems. It includes the core files necessary for both the OpenSSH client and server. The OpenSSH suite consists of the following user-space tools:
sshis a remote login program (SSH client).sshdis an OpenSSH SSH daemon.scpis a secure remote file copy program.sftpis a secure file transfer program.ssh-agentis an authentication agent for caching private keys.ssh-addadds private key identities tossh-agent.ssh-keygengenerates, manages, and converts authentication keys forssh.ssh-copy-idis a script that adds local public keys to theauthorized_keysfile on a remote SSH server.ssh-keyscangathers SSH public host keys.
Two versions of SSH currently exist: version 1, and the newer version 2. The OpenSSH suite in RHEL supports only SSH version 2. It has an enhanced key-exchange algorithm that is not vulnerable to exploits known in version 1.
OpenSSH, as one of core cryptographic subsystems of RHEL, uses system-wide crypto policies. This ensures that weak cipher suites and cryptographic algorithms are disabled in the default configuration. To modify the policy, the administrator must either use the update-crypto-policies command to adjust the settings or manually opt out of the system-wide crypto policies.
Restricting access to specific users, groups, or domains
The
AllowUsers *@192.168.1.*,*@10.0.0.*,!*@192.168.1.2AllowUsersandAllowGroupsdirectives in the/etc/ssh/sshd_configconfiguration file server enable you to permit only certain users, domains, or groups to connect to your OpenSSH server. You can combineAllowUsersandAllowGroupsto restrict access more precisely, for example:
AllowGroups example-group
Linux Log Storage
In Linux, logs are typically stored in the /var/log directory or its subdirectories. The specific location and naming conventions of log files can vary depending on the Linux distribution and the software components installed on the system.
Some of the common log file locations:
/var/log/syslogor/var/log/messages: These files contain general system log messages, including kernel messages and system service status./var/log/auth.logor/var/log/secure: These files store authentication-related events, such as login attempts, authentication failures, and user activity logs./var/log/apache2/access.logor/var/log/nginx/access.log: These files are specific to web servers like Apache or Nginx and store access logs, including information about incoming HTTP requests and response codes./var/log/mysql/error.logor/var/log/postgresql/postgresql-<version>-main.log: These files contain database-specific logs for MySQL or PostgreSQL, respectively. They can provide insights into database activities, errors, queries, and performance-related events./var/log/daemon.logor/var/log/systemd.log: These files capture logs from system daemons and services.
syslog messages:- The
systemd-journalddaemon - The
Rsyslogservice
Sub-directories storing syslog messages
The following sub-directories under the /var/log directory store syslog messages.
/var/log/messages- allsyslogmessages except the following/var/log/secure- security and authentication-related messages and errors/var/log/maillog- mail server-related messages and errors/var/log/cron- log files related to periodically executed tasks/var/log/boot.log- log files related to system startup
Top Log Files to Monitor in Linux
In Linux, there are several important files and directories that are commonly monitored for various purposes, including system monitoring, security monitoring, and troubleshooting.
Here are some key files and directories that are often monitored:
| S.No. | Files | Purpose |
|---|---|---|
| 1 | /var/log/syslog | General system-wide event logs |
| 2 | /var/log/kern.log | Kernel-specific messages |
| 3 | /var/log/boot.log | Logs related to system boot processes |
| 4 | /var/log/auth.log | Records authentication-related events, such as user logins and system authentication attempts |
| 5 | /var/log/dpkg.log | Logs package installation, removal, and modification actions performed with the APT package manager |
| 6 | /var/log/dmesg | Kernel ring buffer logs that contain information about hardware and device drivers |
| 7 | /var/log/iptables.log | Logs generated by the iptables firewall tool |
| 8 | /var/log/apache2/access.log | Records HTTP requests made to the Apache web server |
| 9 | /var/log/apache2/error.log | Contains error messages and warnings from the Apache web server |
| 10 | /var/log/mysql/error.log | Logs MySQL database server errors and warnings |
| 11 | /var/log/postgresql/postgresql-X.X-main.log | PostgreSQL database server logs (X.X represents the version number) |
| 12 | /var/log/mail.log | Logs mail server activity, including sending, receiving, and delivery of emails |
| 13 | /var/log/auth.log | Records authentication-related events, including failed login attempts |
| 14 | /var/log/secure | Security-related events and authentication logs on some Linux distributions (e.g., CentOS/RHEL) |
| 15 | /var/log/cron.log | Logs cron job execution and related information |
| 16 | /var/log/daemon.log | Records events and errors related to system daemons |
These are just some examples of files and directories that are commonly monitored. The specific files to monitor may vary depending on the Linux distribution, the installed software components, and the monitoring requirements of the system.
YUM
To search for a package, use:
# yum search <package name>To include term matches within package descriptions, use:
Use the following procedure to list installed and available packages.
To list information about all installed and available packages, use:
# yum list --all
To list all packages installed on your system, use:
# yum list --installed
To list all packages in all enabled repositories that are available to install, use:
# yum list --available
Use the following procedure to list enabled and disabled repositories.
To list all enabled repositories on your system, use:
# yum repolist
To list all disabled repositories on your system, use:
# yum repolist --disabled
To list both enabled and disabled repositories, use:
# yum repolist --all
To list additional information about the repositories, use:
# yum repoinfo
To ensure global expressions are passed to yum as intended, use one of the following methods:
Double-quote or single-quote the entire global expression.
# yum provides "*/file-name"Replace file-name with the name of the file.
Escape the wildcard characters by preceding them with a backslash (
\) character.
Replace file-name with the name of the file.
Installing software packages
The following section describes how to use yum to:
- Install packages.
- Install a package group.
- Specify a package name in yum input.
7.4.1. Installing packages with YUM
To install a package and all the package dependencies, use:
# yum install package-nameReplace package-name with the name of the package.
To install multiple packages and their dependencies simultaneously, use:
# yum install package-name-1 package-name-2Replace package-name-1 and package-name-2 with the names of the packages.
When installing packages on a multilib system (AMD64, Intel 64 machine), you can specify the architecture of the package by appending it to the package name:
# yum install package-name.archReplace package-name.arch with the name and architecture of the package.
If you know the name of the binary you want to install, but not the package name, you can use the path to the binary as an argument:
# yum install /usr/sbin/binary-fileReplace
/usr/sbin/binary-filewith a path to the binary file.yum searches through the package lists, finds the package which provides
/usr/sbin/binary-file, and prompts you as to whether you want to install it.To install a previously-downloaded package from a local directory, use:
# yum install /path/Replace /path/ with the path to the package.
Note that you can optimize the package search by explicitly defining how to parse the argument. See Section 7.4.3, “Specifying a package name in YUM input” for more details.
Installing a package group with YUM
The following procedure describes how to install a package group by a group name or by a groupID using yum.
To install a package group by a group name, use:
# yum group install group-nameOr
# yum install @group-nameReplace group-name with the full name of the group or environmental group.
To install a package group by the groupID, use:
# yum group install groupIDReplace groupID with the ID of the group.
# yum check-update
# yum update package-name
# yum group update group-name
# yum update
Enabling DNF Automatic
To run DNF Automatic, you always need to enable and start a specific systemd timer unit. You can use one of the timer units provided in the dnf-automatic package, or you can write your own timer unit depending on your needs.
The following section describes how to enable DNF Automatic.
Prerequisites
- You specified the behavior of DNF Automatic by modifying the
/etc/dnf/automatic.confconfiguration file.
For more information about DNF Automatic configuration file, see Section 2.5.6.2, “DNF Automatic configuration file”.
Procedure
Select, enable and start a systemd timer unit that fits your needs:
# systemctl enable --now <unit>
where
<unit>is one of the following timers:dnf-automatic-download.timerdnf-automatic-install.timerdnf-automatic-notifyonly.timerdnf-automatic.timer
For downloading available updates, use:
# systemctl enable dnf-automatic-download.timer
# systemctl start dnf-automatic-download.timer
For downloading and installing available updates, use:
# systemctl enable dnf-automatic-install.timer
# systemctl start dnf-automatic-install.timer
For reporting about available updates, use:
# systemctl enable dnf-automatic-notifyonly.timer
# systemctl start dnf-automatic-notifyonly.timer
Optionally, you can use:
# systemctl enable dnf-automatic.timer
# systemctl start dnf-automatic.timer
In terms of downloading and applying updates, this timer unit behaves according to settings in the /etc/dnf/automatic.conf configuration file. The default behavior is similar to dnf-automatic-download.timer: it downloads the updated packages, but it does not install them.
How to recover redhat kvm vms root password
yum -y install libguestfs-toolsroot@box1 # guestfish --rw -a ./rhel-guest-image-7.1-20150224.0.x86_64.qcow2
><fs> run
><fs> list-filesystems
><fs> mount /dev/sda1 /
><fs> vi /etc/shadow REPLACE ROOT PASSWORD IN /etc/shadow[root@someothersystem ~]# openssl passwd -1 changeme
$1$QiSwNHrs$uID6S6qOifSNZKzfXsmQG1
Create a local repo with Red Hat Enterprise Linux 8/9- Only a RHEL 8 system, Red Hat Satellite, or a Capsule can sync RHEL 8 content correctly.
- On RHEL8, ensure you have
yum-utils-4.0.8-3.el8.noarch or higher installed so reposync correctly downloads all the packages.
Sync all enabled repositories and their repodata
# reposync -p <download-path> --download-metadata --repo=<repo id>reposync --repoid=rhel-8-for-x86_64-highavailability-rpms --download-path=/ha_soft -n --downloadcomps --download-metadata
To sync a specific minor release
For systems registered to the CDN or Red Hat Satellite you must release lock the system with subscription-manager
# subscription-manager release --set=8.4 && rm -rf /var/cache/dnf At this point your system will only have access to content released for RHEL 8.0- 8.4. If you are syncing multiple minor releases, you must keep these separate from each other. For example to sync both 8.4 and 8.5:
# subscription-manager release --set=8.4 && rm -rf /var/cache/dnf
# reposync -p /var/www/html/8.4 --download-metadata --repo=<repo id>
# subscription-manager release --set=8.5 && rm -rf /var/cache/dnf
# reposync -p /var/www/html/8.5 --download-metadata --repo=<repo id>To sync only the latest content for a specific minor release, you must set the subscription-manager version-lock. Then run reposync with the -n option to specify that you only wish to download the latest content (and not content for older minor release versions as well):
# subscription-manager release --set=8.4 && rm -rf /var/cache/dnf
# reposync -n -p /var/www/html/8.4 --download-metadata --repo=<repo id># subscription-manager repos --list
# subscription-manager list --available
# subscription-manager list --consumed
# subscription-manager attach --pool 8a85f9a17f69ca57017faa742b2662ad
# subscription-manager refresh
# subscription-manager repos --enable rhel-8-for-x86_64-highavailability-rpms
Boot From SAN on RedHat with PowerPath and EMC Clarion
Boot From SAN with LVM and Multipath
SEE http://www.thogan.com/site/index.php?option=com_content&view=article&id=5:ubuntu-multipath-boot-from-san-experiment&catid=2:uncatagorized&Itemid=2 for information on our experience with Ubuntu :)
Before getting started, you will need to make sure that you have to proper installation materials, and that the SAN configuration is appropriately setup for a system install.
Install Media
RHEL 4, Update 6 (RHEL 4.6) or RHEL 5. Earlier versions of RedHat, including earlier update versions, have an improperly functioning QLogic driver. Use this specific installation media for this document. Also, depending on the version of the QLogic driver, the SAN devices may be laid out before or after the local storage, use fdisk and look at the volume sizes to identify the local storage and remember which it is.
SAN Configuration
One path to the SAN. There cannot be multiple paths to the SAN during an install as it will cause problems with mounting /boot and finding the LVM partitions. The system must be booted in order to correct the configuration, so you must perform the install with only one path configured. Once the system boots, the appropriate adjustments can be made to fstab and the LVM to allow the system to boot properly with multiple paths.
Location of SAN Boot Card
You must know in which PCI slot the HBA that you will be booting off resides. You will need to configure the BIOS to boot from here. You must also make sure that this is the card with the active path, and you will need to configure that specific card to have boot enabled.
Three things that need to line up:
BIOS boot device = HBA w/active path = HBAconfigured to boot
BIOS Configuration
This section is written based on an installation on IBM x86 hardware. If you are using another platform these menus may be different.
Setting The Boot Device
Boot the system and enter the system BIOS. You will need to make sure that the SAN card is a valid boot device.
Select “Start Options”
Go To “PCI Device Boot Priority”
Modify this field to reflect the PCI slot number in which the boot HBA resides.
Go To “Startup Sequence Options”
Under “Primary Startup Sequence”, set the four devices as follows:[1]
“CD ROM”
“Hard Disk 0”
“Hard Disk 1”
“Network”
Escape back to the main menu.
Select “Save Settings” then “Exit Setup”
Configuring the HBA
The HBA will now need to be configured to be bootable. On the next boot, enter the HBA BIOS. This document was written against QLogic 2460 HBAs. If you are using a different HBA, the process may vary.
Enter the BIOS with a <CTRL-Q> when prompted.
Select the adapter with the active path (also should be the slot configured for boot in the BIOS)
Select “Configuration Settings”
Select “Adapter Settings”
Set “Host Adapter BIOS” to “Enabled”
Return to the previous menu.
Select “Selectable Boot Settings”
Set “Selectable Boot” to “Enabled”
Set each boot device by selecting the field, pressing Enter, then selecting a LUN.
Escape back to the main menu, and select “Save Changes” when prompted.
Select “Select Host Adapter”
Select the other adapter this time (the NON boot one)
Repeat the process as with the first adapter, EXCEPT:
Disable the Host Adapter BIOS”
Disable “Selectable Boot”
Escape to the main menu and save changes again.
Exit the utility and reboot the system.
Starting the Linux Install
Have the appropriate RedHat media in the optical drive and boot the system. Boot to the default graphical install. Watch when the “Loading SCSI Drivers” screen appears, you should see the module for the HBAs get loaded. For the QLogic cards, this is qla2xxx or qla2400.
Once the graphical installer is fully started and prompting you to click next to begin, switch to the terminal by pressing “CTRL-ALT-F2”.
At the console, enter “ls /dev/sd*”. You should see at least /dev/sda and /dev/sdb. There may be more. Identify the SAN and local devices. The local device will usually be /dev/sda. You can test this by entering “fdisk /dev/sda”, then at the menu enter “p” to print the partition table. It will also tell you the size of the volume. Look for a size that indicated a SAN LUN or local storage and remember which devices are which.
Addendum to Standard Linux Build – Partitioning
The name of the volume group created on the SAN device should be “sanvg”. The /boot partition should be create on the SAN device as well.
Continue with the install from this point as described in “Standard Linux Build”.
First Boot After Install
The first boot of the system after installation will likely FAIL. This is normal, as the installer did not choose the appropriate boot device when installing GRUB. To boot the system you will need to modify the GRUB commands.
After you are informed of the failed boot, hit enter to get the GRUB menu.
OH NO! GRUB comes up and the screen is all wiggedy wack! Read Appendix A at the end of the document for help!
With the first boot option selected, press “e” for edit.
The first line in the next menu should be something like “root (hd1,0)”.
Press “e” to edit this line.
Change the line to read “root (hd0,0)”
Hit enter to accept your changes
Press “b” to boot the system with the modified commands.
Later in this document we will edit grub.conf to permanently make this modification.
If you see GRUB in upper left of screen after reboot:
Grub may fail to install to the correct path so it may be necessary to bootup from the DVD/CD in rescue mode using linux rescue at the promt and then performing a grub install as follows:
chroot /mnt/sysimage
grub-install /dev/sdb
Install EMC PowerPath
The PowerPath software will perform failover functions as well as create special /dev devices allowing unambiguous access to the active path.
Fetch the install archive EMCpower.LINUX-5.1.2.00.00-021.tar.gz and extract it. Then use rpm to install the appropriate package onto the system:
Verify EMC PowerPath Install
PowerPath should now be installed. To verify, type “lsmod | grep emc” You should see a lot of modules with names beginning with emc. This indicates that PowerPath has loaded successfully.
Start PowerPath with its init script. Afterward you should see it coalesce the available paths to the SAN into a new virtual device. Verify that this is your SAN device by reading the partition table with fdisk.
[root@ ~]# service PowerPath start
Starting PowerPath: done
[root@ ~]# ls /dev/emcpower*
/dev/emcpower /dev/emcpowera /dev/emcpowera1 /dev/emcpowera2
As you can see above, there are now devices for /dev/emcpowera, a block device representing the SAN which is backed by /dev/sdb - /dev/sde.
[root@ ~]# fdisk /dev/emcpowera
The number of cylinders for this disk is set to 9137.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
(e.g., DOS FDISK, OS/2 FDISK)
Command (m for help): p
Disk /dev/emcpowera: 75.1 GB, 75161927680 bytes
255 heads, 63 sectors/track, 9137 cylinders
Units = cylinders of 16065 * 512 = 8225280 bytes
Device Boot Start End Blocks Id System
/dev/emcpowera1 * 1 19 152586 83 Linux
/dev/emcpowera2 20 9137 73240335 8e Linux LVM
Command (m for help): q
A quick run of fdisk above shows that this is definitely our SAN volume. The boot partition /dev/sdb1 is now available as /dev/emcpowera1.
Modify modprobe.conf
At the end of /etc/modprobe.conf add the following line:
options scsi_mod max_scsi_luns=256
Modify grub.conf
Open the file and make the following edits:
Change any occurrence of “(hd*,0)” to “(hd0,0)”. (Where * is any number that is not 0).
On any line that starts with kernel, remove “rhgb quiet” from the end of it.
Comment out the “hiddenmenu” option with a “#” at the start of the line.
When you are finished, the file should look something like this:
# grub.conf generated by anaconda
#
# Note that you do not have to rerun grub after making changes to this file
# NOTICE: You have a /boot partition. This means that
# all kernel and initrd paths are relative to /boot/, eg.
# root (hd1,0)
# kernel /vmlinuz-version ro root=/dev/sanvg/rootlv
# initrd /initrd-version.img
#boot=/dev/sda
default=0
timeout=5
splashimage=(hd0,0)/grub/splash.xpm.gz
#hiddenmenu
title Red Hat Enterprise Linux AS (2.6.9-67.ELsmp)
root (hd0,0)
kernel /vmlinuz-2.6.9-67.ELsmp ro root=/dev/sanvg/rootlv
initrd /initrd-2.6.9-67.ELsmp.img
title Red Hat Enterprise Linux AS-up (2.6.9-67.EL)
root (hd0,0)
kernel /vmlinuz-2.6.9-67.EL ro root=/dev/sanvg/rootlv
initrd /initrd-2.6.9-67.EL.img
Modify the LVM Config
Finally, you must modify the LVM config file in /etc/lvm/lvm.conf to ignore the raw paths to the SAN and only use the PowerPath devices.
Find the line that sets up the default filter:
filter = [ "a/.*/" ]
Comment it out with a “#” at the start of the line, then put in the following line to tell LVM to only look at the emcpower devices and local storage:
filter = [ "a/sda/", "a/emcpower/", "r/.*/" ]
This is assuming that /dev/sda is local storage, you may have to modify this line if another device is local storage.
To make sure that the filter is working, run “vgscan” and verify that there are no messages about a “Duplicate PV”.
[root@mnsvliapp003 ~]# vgscan
Reading all physical volumes. This may take a while...
Found volume group "sanvg" using metadata type lvm2
Setting Failover Policy
The appropriate failover policy will need to be set depending on the type of SAN. Up to this point, only one path to each service processor should show as “active”, the rest show a state of “unlic”. Running “powermt display dev=all” will show this information:
If the PowerPath license has not been installed do so with:
emcpreg --install
[root@~]# powermt display dev=all
Pseudo name=emcpowera
CLARiiON ID=APM00064800054 [prod_jboss1]
Logical device ID=60060160A9D01A00A2AD9882F5ACDC11 [prod_jboss1_lun20]
state=alive; policy=BasicFailover; priority=0; queued-IOs=0
Owner: default=SP A, current=SP A
==============================================================================
---------------- Host --------------- - Stor - -- I/O Path - -- Stats ---
### HW Path I/O Paths Interf. Mode State Q-IOs Errors
==============================================================================
1 qla2xxx sdb SP A4 active alive 0 0
1 qla2xxx sdc SP B5 active alive 0 0
2 qla2xxx sdd SP A5 unlic alive 0 0
2 qla2xxx sde SP B4 unlic alive 0 0
For a CLARiiON array, issue the following command to set the failover policy to “CLARiiON Optimal”. this will cause all other paths to become active. You will then need to save the configuration, and it will then persist across reboots.
[root@ ~]# powermt set policy=co
[root@ ~]# powermt display dev=all
Pseudo name=emcpowera
CLARiiON ID=APM00064403323 [dr_epicdb]
Logical device ID=600601602E811900C8E4B43C79AADC11 [dr_epicdb_LUN_100]
state=alive; policy=CLAROpt; priority=0; queued-IOs=0
Owner: default=SP A, current=SP A
==============================================================================
---------------- Host --------------- - Stor - -- I/O Path - -- Stats ---
### HW Path I/O Paths Interf. Mode State Q-IOs Errors
==============================================================================
1 qla2xxx sdb SP B4 active alive 0 0
1 qla2xxx sdc SP A5 active alive 0 0
2 qla2xxx sdd SP B4 active alive 0 0
2 qla2xxx sde SP A5 active alive 0 0
Error displaying HBAs and associated devices.
[root@ ~]# powermt save
CABLE PULL TEST
At this point in the document, the configuration should be correct to survive a cable pull test. If the system cannot recover from the I/O errors after a cable pull at this point, something is wrong with the configuration. Review all steps and ensure that the output from the diagnostic commands is consistent with what is documented here.
Finishing Up
The system should be configured to boot and handle multiple paths now. Have the extra paths configured on the SAN then reboot the system.
During the system startup, PowerPath may report failure to start. This is fine, all that failed was the module load, which is because the modules were already loaded in the initrd.
Checking the PowerPath Configuration
PowerPath should now see all the active paths to the storage. To verify this, run the command “powermt display dev=all”. This should return the expected number of paths and show what raw devices are backing each path.
[root@ ~]# powermt display dev=all
Pseudo name=emcpowera
CLARiiON ID=APM00064403323 [dr_epicdb]
Logical device ID=600601602E811900C8E4B43C79AADC11 [dr_epicdb_LUN_100]
state=alive; policy=CLAROpt; priority=0; queued-IOs=0
Owner: default=SP A, current=SP A
==============================================================================
---------------- Host --------------- - Stor - -- I/O Path - -- Stats ---
### HW Path I/O Paths Interf. Mode State Q-IOs Errors
==============================================================================
1 qla2xxx sdb SP B4 active alive 0 0
1 qla2xxx sdc SP A5 active alive 0 0
2 qla2xxx sdd SP B4 active alive 0 0
2 qla2xxx sde SP A5 active alive 0 0
Error displaying HBAs and associated devices.
[1] Many BIOSes have an option for “PCI” or “Additional Boot Devices”, or even names the HBA. If this is the case on the target system, use that selection instead of “Hard Disk”. On the IBM hardware the PCI boot device magically becomes Hard Disk 0 or 1 in the boot order, so make sure they are both in there. Boot from SAN may fail if there are bootable partitions on ANY local storage device.
To upgrade the Kernel:
Move /etc/init.d/PowerPath to /root.
Comment out references to PowerPath pseudo (emcpower?) devices from system configuration files such as /etc/fstab and /etc/lvm/lvm.conf.
Reboot the machine.
Stop the Navisphere agent (CLARiiON only)
# /etc/init.d/naviagent stop
Stop the ECC Master Agent (Symmetrix only)
# /etc/init.d/eccmad stop
Kill any remaining "mlragent" processes.
Uninstall the EMCpower.LINUX rpm package
# rpm -e EMCpower.LINUX
Upgrade the kernel.
Reboot the machine.
Stop the Navisphere agent (CLARiiON only)
# /etc/init.d/naviagent stop
Stop the ECC Master Agent (Symmetrix only)
# /etc/init.d/eccmad stop
Kill any remaining "mlragent" processes.
Uncomment references to PowerPath pseudo devices from system configuration files such as /etc/fstab and /etc/lvm/lvm.conf.
Reboot the machine.
LVM Cheat Sheet
Example:
Add a new disk with VMware or physical
Resize a VMWare PV
vmkfstools -X 125G /vmfs/volumes/LUNNAME/servername/servername.vmdk
pvchange -x n /dev/sdb
pvresize /dev/sdb
pvchange -x y /dev/sdb
echo "scsi scan-new-devices" > /proc/scsi/scsi to recognize the new disk or reboot the server
For single devices use:
echo "scsi add-single-device 0 0 1 0" > /proc/scsi/scsi
echo "scsi add-single-device 0 0 2 0" > /proc/scsi/scsi
....
echo "scsi add-single-device 0 0 4 0" > /proc/scsi/scsi
echo "scsi add-single-device 0 0 5 0" > /proc/scsi/scsi
echo "scsi add-single-device 0 0 6 0" > /proc/scsi/scsi
After creating a new 50 GB partition (/dev/sdb1) with fdisk run the below commands:
# partprobe
# pvcreate /dev/sdb1
# vgcreate vg1 /dev/sdb1
# lvcreate -L 49G -n lv1 vg1
lvcreate -l 100%FREE -n lv1 vg1
# mke2fs -j /dev/vg1/lv1
To remove:
pvremove -ff /dev/sdaX
Add disk example:
fdisk /dev/sdc and /dev/sdd
end up with /dev/sdc1 and /dev/sdd1
partprobe
pvcreate /dev/sdc1
pvcreate /dev/sdd1
vgextend vg1 /dev/sdc1 /dev/sdd1
lvextend -L +98G /dev/vg1/lv1
ext2online /dev/vg1/lv1
or unmounted disk method:
umount /usr/local/cvsroot
e2fsck -f /dev/vg1/lv1
resize2fs /dev/vg1/lv1
reboot or remount the drive manually
Combining Disks:
To create new physical volume
pvcreate /dev/sda
Create a volume group from physical disks sda and sdb
vgcreate StorageVG /dev/sda /dev/sdb
Extend an existing volume group with additional physical disks
vgextend StorageVG /dev/sda /dev/sdb
Create or mod a logical volume
lvcreate -n MyStorageLV --size 10000 StorageVG
creates a 10GB logical volume called MyStorageLV on the Volume Group StorageVG
lvcreate -l 100%FREE -n MyStorageLV StorageVG
Create an ext3 filesystem on the new LV
mke2fs -j /dev/StorageVG/MyStorageLV
to make swap use mkswap instead
create a mount point i.e. mkdir /mnt/mystorage
then mount with:
mount -t ext3 /dev/StorageVG/MyStorageLV /mnt/mystorage
add the new mount to /etc/fstab by adding line:
/dev/StorageVG/MyStorageLV /mnt/mystorage
To extend the logical volume, expand VG if required and then use:
lvextend --size <size> /dev/StorageVG/MyStorageLV
Finally expand the filesystem to utilize new disk space with:
resize2fs /dev/StorageVG/MyStorageLV
Real example of extending /var online
added disk with VMWare edit settings where disk 0 and 1 already exist
echo "scsi add-single-device 0 0 2 0" > /proc/scsi/scsi
fdisk /dev/sdc choose n p 1 then defaults
partprobe
pvcreate /dev/sdc1
vgextend LogVolGrp00 /dev/sdc1
lvextend -L +7936M /dev/LogVolGrp00/LvVar
ext2online /dev/LogVolGrp00/LvVar
Multipath device using LVM example
Move /etc/multipath.conf to /tmp: if you're lucky like me your devices are already supported and this will unblacklist your new entries, otherwise you will need to go and see my other multipath doc, vendor docs for your devices, and redhat docs to get everything working
Edit /etc/lvm/lvm.conf and replace filter with:
filter = [ "a/dev/mapper/.*/", "r/dev/sd.*/", "r/dev/cciss/.*/" ]
# Will prevent lvm from spitting out a bunch of errors about the underlying devices that we don't need anyhow
Do following to add all four paths for Lun1 to the server:
echo "scsi add-single-device 0 0 0 1" > /proc/scsi/scsi
echo "scsi add-single-device 1 0 0 1" > /proc/scsi/scsi
echo "scsi add-single-device 0 0 1 1" > /proc/scsi/scsi
echo "scsi add-single-device 1 0 1 1" > /proc/scsi/scsi
multipath -ll # to get the device id for following commands (multipath.conf must be gone or configured correctly for this to work)
pvcreate /dev/mapper/3600601601781190068d807e1f3cade11 # create physical volume for use with lvm
vgcreate appsvg /dev/mapper/3600601601781190068d807e1f3cade11 # create volume group
lvcreate -L +19G -n appslv appsvg # Create 19G logical volume
lvcreate -l 100%FREE -n lv1 vg1
mke2fs -j /dev/mapper/appsvg-appslv #fomats filesystem ext3
e2label /dev/mapper/appsvg-appslv apps
#Now we can use the apps label to mount this guy
edit rc.sysinit with:
echo "TIME TO PUT SOME MPATH LINKING HERE!"
ORIGINAL_IFS=$IFS
IFS=`echo -en '\n\b'`
MPATH_LABELS=`for d in /dev/mapper/* ; do echo -n "$d " ; e2label $d 2>&1 ; done | grep mapper | grep -v Bad`
mkdir /dev/mpath_links 2>/dev/null
rm -f /dev/mpath_links/*
for mpath in $MPATH_LABELS; do
mpath_device=`echo $mpath | awk '{print $1}'`
mpath_label=`echo $mpath | awk '{print $2}'`
mpath_link="/dev/mpath_links/$mpath_label"
echo "Linking $mpath_device -> $mpath_link"
ln -s $mpath_device $mpath_link
done
IFS=$ORIGINAL_IFS
echo "DONE WITH CUSTOM MPATH LINKING"
immediately after:
if strstr "$cmdline" noreadonlyroot ; then
READONLY=no
fi
and edit fstab with:
/dev/mpath_links/apps /apps ext3 defaults 0 0
Script to create VMSS and LB in Azure #!/bin/bashaz network vnet create -g MyRG -n VNET1az network vnet subnet create -g MyRG --vnet VNET1 -n S1
az network public-ip create -g MyRG -n IP1 --allocation-method static --dns-name MyDNS
az network lb create -g MyRG -n LB1 --frontend-ip-name LBFE --backend-pool-name LBBE
az network lb inbound-nat-rule create -g MyRG -n SSH1 --lb-name LB1 --backend-port 22 \
--frontend-port 21 --frontend-ip-name LBFE --protocal tcp
az network lb inbound-nat-rule create -g MyRG -n SSH2 --lb-name LB1 --backend-port 22 \
--frontend-port 23 --frontend-ip-name LBFE --protocal tcp
az network lb probe create -g MyRG -n HTTPPROBE --lb-name LB1 --port 80 --protocol tcp
az network lb rule create -g MyRG -n HTTP1 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \
--frontend-ip-name LBFE --frontend-port 80 --backend-pool-name LBBE --backend-port 80
az network lb rule create -g MyRG -n HTTP2 --lb-name MyLB --probe-name HTTPPROBE --protocal tcp \
--frontend-ip-name LBFE --frontend-port 1234 --backend-pool-name LBBE --backend-port 8000
az network lb show -g MyRG -n LB1
az network nic create -g MyRG -n NIC1 --subnet-name S1 --vnet-name VNET1 --lb-address-pool-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/backendAddressPools/LBBE' --lb-nat-rule-ids '/subscriptions/[...]/resourceGroups/MyRG/providers/Microsoft.Network/loadBalancers/LB1/inboundNatRules/ssh1'
az network nic create -g MyRG -n NIC2 --subnet-name S1 --vnet-name VNET1 --lb-address-pool-ids 'LBBE' --lb-nat-rule-ids 'ssh1'
az vm availability-set create -g MyRG -n AS1
az vm create -g MyRG -n VM1 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC1
az vm create -g MyRG -n VM2 --availability-set AS1 --vnet VNET1 --subnet S1 --nic NIC2









Comments
Post a Comment