November 2024
M T W T F S S
 123
45678910
11121314151617
18192021222324
252627282930  

Categories

November 2024
M T W T F S S
 123
45678910
11121314151617
18192021222324
252627282930  

kubernetes centos7.3

Last login: Thu Mar 1 00:45:37 2018 from 192.168.1.248
[root@clusterserver1 ~]# uname -a
Linux clusterserver1.rmohan.com 3.10.0-693.17.1.el7.x86_64 #1 SMP Thu Jan 25 20:13:58 UTC 2018 x86_64 x86_64 x86_64 GNU/Linux
[root@clusterserver1 ~]#

Master : 192.168.1.20
Worker1 : 192.168.1.21
Worker2 : 192.168.1.23
worker3 : 192.168.1.24

hostnamectl set-hostname ‘clusterserver1.rmohan.com’
exec bash
setenforce 0
sed -i –follow-symlinks ‘s/SELINUX=enforcing/SELINUX=disabled/g’ /etc/sysconfig/selinux

[root@clusterserver1 ~]# cat /etc/redhat-release
CentOS Linux release 7.4.1708 (Core)
[root@clusterserver1 ~]#

CentOS Linux release 7.4.1708 (Core)
[root@clusterserver1 ~]# cat /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.1.20 clusterserver1.rmohan.com clusterserver1 master
192.168.1.21 clusterserver2.rmohan.com clusterserver2 worker1
192.168.1.22 clusterserver3.rmohan.com clusterserver3 worker2
192.168.1.23 clusterserver4.rmohan.com clusterserver4 worker3
[root@clusterserver1 ~]#

[root@clusterserver1 ~]#systemctl stop firewalld && systemctl disable firewalld

[root@clusterserver1 ~]#yum update -y
[root@clusterserver1 ~]# modprobe br_netfilter
[root@clusterserver1 ~]#sysctl net.bridge.bridge-nf-call-iptables=1
[root@clusterserver1 ~]#sysctl net.bridge.bridge-nf-call-ip6tables=1

[root@clusterserver1 ~]# cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
[root@clusterserver1 ~]# sysctl –system

cat <<EOF > /etc/yum.repos.d/virt7-dockerrelease.repo
[virt7-docker-common-release]
name=virt7-docker-common-release
baseurl=http://cbs.centos.org/repos/virt7-docker-common-release/x86_64/os/
gpgcheck=0
EOF

cat <<eof > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
eof

[root@clusterserver1 ~]# yum install ebtables ethtool

[root@clusterserver1 ~]# yum install -y kubelet kubeadm kubectl docker

#systemctl enable kubelet && systemctl start kubelet

After this Check Kubelet is running fine

#systemctl status kubelet -l

[root@clusterserver1 ~]# systemctl status kubelet -l
? kubelet.service – kubelet: The Kubernetes Node Agent
Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
??10-kubeadm.conf
Active: activating (auto-restart) (Result: exit-code) since Thu 2018-03-01 16:16:40 +08; 8s ago
Docs: http://kubernetes.io/docs/
Process: 2504 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CGROUP_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
Main PID: 2504 (code=exited, status=1/FAILURE)

Mar 01 16:16:40 clusterserver1.rmohan.com systemd[1]: kubelet.service: main process exited, code=exited, status=1/FAILURE
Mar 01 16:16:40 clusterserver1.rmohan.com systemd[1]: Unit kubelet.service entered failed state.
Mar 01 16:16:40 clusterserver1.rmohan.com systemd[1]: kubelet.service failed.
[root@clusterserver1 ~]# ystemctl status kubelet -l
-bash: ystemctl: command not found
[root@clusterserver1 ~]# systemctl status kubelet -l
? kubelet.service – kubelet: The Kubernetes Node Agent
Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
Drop-In: /etc/systemd/system/kubelet.service.d
??10-kubeadm.conf
Active: activating (auto-restart) (Result: exit-code) since Thu 2018-03-01 16:17:01 +08; 1s ago
Docs: http://kubernetes.io/docs/
Process: 2517 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CGROUP_ARGS $KUBELET_CERTIFICATE_ARGS $KUBELET_EXTRA_ARGS (code=exited, status=1/FAILURE)
Main PID: 2517 (code=exited, status=1/FAILURE)

Can verify errors in /var/log/message
[root@clusterserver1 ~]# tail -f /var/log/messages
Mar 1 16:17:11 clusterserver1 systemd: kubelet.service holdoff time over, scheduling restart.
Mar 1 16:17:11 clusterserver1 systemd: Started kubelet: The Kubernetes Node Agent.
Mar 1 16:17:11 clusterserver1 systemd: Starting kubelet: The Kubernetes Node Agent…
Mar 1 16:17:11 clusterserver1 kubelet: I0301 16:17:11.580912 2523 feature_gate.go:226] feature gates: &{{} map[]}
Mar 1 16:17:11 clusterserver1 kubelet: I0301 16:17:11.581022 2523 controller.go:114] kubelet config controller: starting controller
Mar 1 16:17:11 clusterserver1 kubelet: I0301 16:17:11.581030 2523 controller.go:118] kubelet config controller: validating combination of defaults and flags
Mar 1 16:17:11 clusterserver1 kubelet: error: unable to load client CA file /etc/kubernetes/pki/ca.crt: open /etc/kubernetes/pki/ca.crt: no such file or directory
Mar 1 16:17:11 clusterserver1 systemd: kubelet.service: main process exited, code=exited, status=1/FAILURE
Mar 1 16:17:11 clusterserver1 systemd: Unit kubelet.service entered failed state.
Mar 1 16:17:11 clusterserver1 systemd: kubelet.service failed.
Mar 1 16:17:21 clusterserver1 systemd: kubelet.service holdoff time over, scheduling restart.
Mar 1 16:17:21 clusterserver1 systemd: Started kubelet: The Kubernetes Node Agent.
Mar 1 16:17:21 clusterserver1 systemd: Starting kubelet: The Kubernetes Node Agent…
Mar 1 16:17:21 clusterserver1 kubelet: I0301 16:17:21.830092 2535 feature_gate.go:226] feature gates: &{{} map[]}
Mar 1 16:17:21 clusterserver1 kubelet: I0301 16:17:21.830201 2535 controller.go:114] kubelet config controller: starting controller
Mar 1 16:17:21 clusterserver1 kubelet: I0301 16:17:21.830209 2535 controller.go:118] kubelet config controller: validating combination of defaults and flags
Mar 1 16:17:21 clusterserver1 kubelet: error: unable to load client CA file /etc/kubernetes/pki/ca.crt: open /etc/kubernetes/pki/ca.crt: no such file or directory
Mar 1 16:17:21 clusterserver1 systemd: kubelet.service: main process exited, code=exited, status=1/FAILURE
Mar 1 16:17:21 clusterserver1 systemd: Unit kubelet.service entered failed state.
Mar 1 16:17:21 clusterserver1 systemd: kubelet.service failed.

So here Issue is “error: unable to load client CA file /etc/kubernetes/pki/ca.crt” As we have not installed kubeadm here

So now if this you are trying on worker node and already installed kubeadm on master then you can try Copying files from Master.

If this is you Master Node setup then let Start Master Node setup here

If kubernates was tried/installed any time on your machine then you need to reset kubeadm
run command on Master Node

[root@clusterserver1 ~]# kubeadm reset
[preflight] Running pre-flight checks.
[reset] Stopping the kubelet service.
[reset] Unmounting mounted directories in “/var/lib/kubelet”
[reset] Removing kubernetes-managed containers.
[reset] Docker doesn’t seem to be running. Skipping the removal of running Kubernetes containers.
[reset] No etcd manifest found in “/etc/kubernetes/manifests/etcd.yaml”. Assuming external etcd.
[reset] Deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[root@clusterserver1 ~]#

[root@clusterserver1 ~]# kubeadm init
[init] Using Kubernetes version: v1.9.3
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
[preflight] The system verification failed. Printing the output from the verification:
KERNEL_VERSION: 3.10.0-693.17.1.el7.x86_64
CONFIG_NAMESPACES: enabled
CONFIG_NET_NS: enabled
CONFIG_PID_NS: enabled
CONFIG_IPC_NS: enabled
CONFIG_UTS_NS: enabled
CONFIG_CGROUPS: enabled
CONFIG_CGROUP_CPUACCT: enabled
CONFIG_CGROUP_DEVICE: enabled
CONFIG_CGROUP_FREEZER: enabled
CONFIG_CGROUP_SCHED: enabled
CONFIG_CPUSETS: enabled
CONFIG_MEMCG: enabled
CONFIG_INET: enabled
CONFIG_EXT4_FS: enabled (as module)
CONFIG_PROC_FS: enabled
CONFIG_NETFILTER_XT_TARGET_REDIRECT: enabled (as module)
CONFIG_NETFILTER_XT_MATCH_COMMENT: enabled (as module)
CONFIG_OVERLAY_FS: enabled (as module)
CONFIG_AUFS_FS: not set – Required for aufs.
CONFIG_BLK_DEV_DM: enabled (as module)
OS: Linux
CGROUPS_CPU: enabled
CGROUPS_CPUACCT: enabled
CGROUPS_CPUSET: enabled
CGROUPS_DEVICES: enabled
CGROUPS_FREEZER: enabled
CGROUPS_MEMORY: enabled
[WARNING FileExisting-crictl]: crictl not found in system path
[WARNING Service-Docker]: docker service is not enabled, please run ‘systemctl enable docker.service’
[preflight] Some fatal errors occurred:
[ERROR SystemVerification]: failed to get docker info: Cannot connect to the Docker daemon at unix:///var/run/docker.sock. Is the docker daemon running?
[ERROR Swap]: running with swap on is not supported. Please disable swap
[ERROR Service-Docker]: docker service is not active, please run ‘systemctl start docker.service’
[preflight] If you know what you are doing, you can make a check non-fatal with `–ignore-preflight-errors=…`

Disable swap

swapoff -a

enable docker and start the docker services

[root@clusterserver1 ~]# kubeadm init
[init] Using Kubernetes version: v1.9.3
[init] Using Authorization modes: [Node RBAC]
[preflight] Running pre-flight checks.
[WARNING FileExisting-crictl]: crictl not found in system path
[preflight] Starting the kubelet service
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [clusterserver1.rmohan.com kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.1.20]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in “/etc/kubernetes/pki”
[kubeconfig] Wrote KubeConfig file to disk: “admin.conf”
[kubeconfig] Wrote KubeConfig file to disk: “kubelet.conf”
[kubeconfig] Wrote KubeConfig file to disk: “controller-manager.conf”
[kubeconfig] Wrote KubeConfig file to disk: “scheduler.conf”
[controlplane] Wrote Static Pod manifest for component kube-apiserver to “/etc/kubernetes/manifests/kube-apiserver.yaml”
[controlplane] Wrote Static Pod manifest for component kube-controller-manager to “/etc/kubernetes/manifests/kube-controller-manager.yaml”
[controlplane] Wrote Static Pod manifest for component kube-scheduler to “/etc/kubernetes/manifests/kube-scheduler.yaml”
[etcd] Wrote Static Pod manifest for a local etcd instance to “/etc/kubernetes/manifests/etcd.yaml”
[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory “/etc/kubernetes/manifests”.
[init] This might take a minute or longer if the control plane images have to be pulled.
[apiclient] All control plane components are healthy after 85.002972 seconds
[uploadconfig] Storing the configuration used in ConfigMap “kubeadm-config” in the “kube-system” Namespace
[markmaster] Will mark node clusterserver1.rmohan.com as master by adding a label and a taint
[markmaster] Master clusterserver1.rmohan.com tainted and labelled with key/value: node-role.kubernetes.io/master=””
[bootstraptoken] Using token: 8c45ed.70033b8135e5439a
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] Creating the “cluster-info” ConfigMap in the “kube-public” namespace
[addons] Applied essential addon: kube-dns
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run “kubectl apply -f [podnetwork].yaml” with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

kubeadm join –token 8c45ed.70033b8135e5439a 192.168.1.20:6443 –discovery-token-ca-cert-hash sha256:1df9c0250f28e5a4d137f29307b787954948fc417f2fa9a06a195d65f41b959d

[root@clusterserver1 ~]# mkdir -p $HOME/.kube
[root@clusterserver1 ~]# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@clusterserver1 ~]# chown $(id -u):$(id -g) $HOME/.kube/config

[root@clusterserver1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
clusterserver1.rmohan.com NotReady master 2m v1.9.3
[root@clusterserver1 ~]#

Install kubeadm and docker package on both nodes

[root@clusterserver2 ~]# yum install kubeadm docker -y
[root@clusterserver3 ~]# yum install kubeadm docker -y
[root@clusterserver4 ~]# yum install kubeadm docker -y

Start and enable docker service

[root@clusterserver2 ~]# systemctl restart docker && systemctl enable docker
[root@clusterserver3 ~]# systemctl restart docker && systemctl enable docker
[root@clusterserver4 ~]# systemctl restart docker && systemctl enable docker

Step 4: Now Join worker nodes to master node

To join worker nodes to Master node, a token is required. Whenever kubernetes master initialized , then in the output we get command and token. Copy that command and run on both nodes.

[root@worker-node1 ~]# kubeadm join –token 8c45ed.70033b8135e5439a 192.168.1.20:6443 –discovery-token-ca-cert-hash sha256:1df9c0250f28e5a4d137f29307b787954948fc417f2fa9a06a195d65f41b959d

yum install epel-release -y

yum install ansible -y

How to Setup a WebDAV Server Using Apache on CentOS 7

How to Setup a WebDAV Server Using Apache on CentOS 7

CentOS Linux Guides Server Apps Web Servers

WebDAV stands for “Web-based Distributed Authoring and Versioning”. It’s an extension of the HTTP protocol that allows users to manage and share files stored on a WebDAV-enabled web server.

This tutorial will show you how to setup a WebDAV server using Apache on a Vultr CentOS 7 server instance.

Prerequisites
A Vultr CentOS 7 server instance.
A non-root sudo user. You can learn more about how to create a sudo user in this Vultr tutorial.
Step one: Update the system
sudo yum install epel-release
sudo yum update -y
sudo shutdown -r now
After the reboot, use the same sudo user to log in.

Step two: Install Apache
Install Apache using YUM:

sudo yum install httpd
Disable Apache’s default welcome page:

sudo sed -i ‘s/^/#&/g’ /etc/httpd/conf.d/welcome.conf
Prevent the Apache web server from displaying files within the web directory:

sudo sed -i “s/Options Indexes FollowSymLinks/Options FollowSymLinks/” /etc/httpd/conf/httpd.conf
Start the Apache web server:

sudo systemctl start httpd.service
sudo systemctl enable httpd.service
Step three: Setup WebDAV
For Apache, there are three WebDAV-related modules which will be loaded by default when a Apache web server getting started. You can confirm that with this command:

sudo httpd -M | grep dav
You should be presented with:

dav_module (shared)
dav_fs_module (shared)
dav_lock_module (shared)
Next, create a dedicated directory for WebDAV:

sudo mkdir /var/www/html/webdav
sudo chown -R apache:apache /var/www/html
sudo chmod -R 755 /var/www/html
For security purposes, you need to create a user account, say it is “user001”, to access the WebDAV server, and then input your desired password. Later, you will use this user account to log into your WebDAV server.

sudo htpasswd -c /etc/httpd/.htpasswd user001
Modify the owner and permissions in order to enhance security:

sudo chown root:apache /etc/httpd/.htpasswd
sudo chmod 640 /etc/httpd/.htpasswd
Step four: Create a virtual host for WebDAV
sudo vi /etc/httpd/conf.d/webdav.conf
Populate the file with:

DavLockDB /var/www/html/DavLock
<VirtualHost *:80>
ServerAdmin webmaster@localhost
DocumentRoot /var/www/html/webdav/
ErrorLog /var/log/httpd/error.log
CustomLog /var/log/httpd/access.log combined
Alias /webdav /var/www/html/webdav
<Directory /var/www/html/webdav>
DAV On
AuthType Basic
AuthName “webdav”
AuthUserFile /etc/httpd/.htpasswd
Require valid-user
</Directory>
</VirtualHost>
Save and quit:

:wq!
Restart Apache to put your changes into effect:

sudo systemctl restart httpd.service
Step five: Modify firewall rules
sudo firewall-cmd –zone=public –permanent –add-service=http
sudo firewall-cmd –reload
Step six: Test the functionality of the WebDAV server from a local machine
In order to take advantage of WebDAV, you need to use a qualified client. For example, you can install a program called cadaver on a CentOS 7 desktop:

sudo yum install cadaver
Having cadaver installed, use the following command to access the WebDAV server:

cadaver http://<your-server-ip>/webdav/
Use the username “user001” and the password you setup earlier to log in.

In the cadaver shell, you can upload and organize files as you wish. Here are some examples.

To upload a local file “/home/user/abc.txt” to the WebDAV server:

dav:/webdav/> put /home/user/abc.txt
To create a directory “dir1” on the WebDAV server:

dav:/webdav/> mkdir dir1
To quit the cadaver shell:

dav:/webdav/> exit
If you want to learn more about cadaver, you can look up the cadaver manual in the Bash shell:

man cadaver
or

cadaver -h

 

 

Once it is done, you shall disable Apache’s default welcome page.

[root@linuxhelp ~]# sed -i ‘s/^/#&/g’ /etc/httpd/conf.d/welcome.conf

Also, prevent the Apache web server from displaying files within the web directory.

[root@linuxhelp ~]# sed -i “s/Options Indexes FollowSymLinks/Options FollowSymLinks/” /etc/httpd/conf/httpd.conf

After that, start and enable the Apache web server.

[root@linuxhelp ~]# systemctl start httpd.service
[root@linuxhelp ~]# systemctl enable httpd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.
Setup WebDAV

For Apache, there are three WebDAV-related modules which will be loaded by default when an Apache web server is getting started.

[root@linuxhelp ~]# httpd -M | grep dav
dav_module (shared)
dav_fs_module (shared)
dav_lock_module (shared)

Next, create a dedicated directory for WebDAV:

[root@linuxhelp ~]# mkdir /var/www/html/webdav
[root@linuxhelp ~]# chown -R apache:apache /var/www/html
[root@linuxhelp ~]# chmod -R 755 /var/www/html

For security purposes, you need to create a user account.

[root@linuxhelp ~]# htpasswd -c /etc/httpd/.htpasswd user1
New password:
Re-type new password:
Adding password for user user1

And also, you need to modify the owner and permissions in order to enhance security

[root@linuxhelp ~]# chown root:apache /etc/httpd/.htpasswd
[root@linuxhelp ~]# chmod 640 /etc/httpd/.htpasswd

Once it is done, you need to create a VirtialHost for WebDAV.

[root@linuxhelp ~]# vi /etc/httpd/conf.d/webdav.conf
DavLockDB /var/www/html/DavLock
<VirtualHost *:80>
ServerAdmin webmaster@localhost
DocumentRoot /var/www/html/webdav/
ErrorLog /var/log/httpd/error.log
CustomLog /var/log/httpd/access.log combined
Alias /webdav /var/www/html/webdav
<Directory /var/www/html/webdav>
DAV On
AuthType Basic
AuthName “webdav”
AuthUserFile /etc/httpd/.htpasswd
Require valid-user
</Directory>
</VirtualHost>

Once the VirtualHost is configured, you need to restart Apache to put your changes into effect.

[root@linuxhelp ~]# systemctl restart httpd.service

Test the functionality of the WebDAV server from a local machine. In order to take advantage of WebDAV, you need to use a qualified client. For example, you can install a program called cadaver on a CentOS 7 desktop

[root@linuxhelp ~]# yum install cadaver
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
* base: mirror.dhakacom.com
* epel: mirror2.totbb.net
* extras: mirrors.viethosting.com
* updates: centos-hn.viettelidc.com.vn
Resolving Dependencies
–> Running transaction check
—> Package cadaver.x86_64 0:0.23.3-9.el7 will be installed
–> Finished Dependency Resolution

.
.
Running transaction
Installing : cadaver-0.23.3-9.el7.x86_64 1/1
Verifying : cadaver-0.23.3-9.el7.x86_64 1/1

Installed:
cadaver.x86_64 0:0.23.3-9.el7

Complete!

Having cadaver installed, use the following command to access the WebDAV server.

[root@linuxhelp ~]# cadaver http://192.168.7.234/webdav/
Authentication required for webdav on server `192.168.7.234′:
Username: user1
Password:
dav:/webdav/>

In the cadaver shell, you can upload and organize files as you wish. Here are some examples. To upload a local file

dav:/webdav/> put /root/Desktop/linuxhelp.txt
Uploading /root/Desktop/linuxhelp.txt to `/webdav/linuxhelp.txt’: succeeded.

To create a directory “dir1” on the WebDAV server

dav:/webdav/> mkdir dir

To quit the cadaver shell

dav:/webdav/> exit
Connection to `192.168.7.234′ closed.
If you want to learn more about cadaver, you can look up the cadaver manual in the Bash shell. With this, the tutorial on setting up a WebDAV Server Using Apache on CentOS 7 comes to an end.

NoSQL vs SQL comparison

NoSQL vs SQL comparison

The following table compares the main differences between NoSQL and SQL.

 

installing Nagios 4.1 on Centos 7

#### Problem Statement installing Nagios 4.1 on Centos 7 
## Step #1: Install LAMP
	yum install httpd -y
	systemctl start httpd
	systemctl enable httpd
	yum -y install mariadb-server mariadb
	systemctl start mariadb
	systemctl enable mariadb
	systemctl status mariadb
	mysql
	mysql_secure_installation
	yum install php php-mysql php-gd php-pear -y
## Install dependencies for Nagios
	sudo yum install -y gcc glibc glibc-common gd gd-devel make net-snmp openssl-devel xinetd unzip wget
#Create user and group 
	sudo useradd nagios
	sudo groupadd nagcmd
	sudo usermod -a -G nagcmd nagios
# download Core nagios source and build it from source
	cd ~
	curl -L -O https://assets.nagios.com/downloads/nagioscore/releases/nagios-4.1.1.tar.gz
	tar xvf nagios-*.tar.gz
	cd nagios-*
	./configure --with-command-group=nagcmd 
	make all
	sudo make install
	sudo make install-commandmode
	sudo make install-init
	sudo make install-config
	sudo make install-webconf
	sudo usermod -G nagcmd apache
# Download and install plugins from Source
	cd ~
	curl -L -O http://nagios-plugins.org/download/nagios-plugins-2.1.1.tar.gz
	tar xvf nagios-plugins-*.tar.gz
	cd nagios-plugins-*
	./configure --with-nagios-user=nagios --with-nagios-group=nagios --with-openssl
	make
	sudo make install
# Install NRPE and build from source
	cd ~
	curl -L -O http://downloads.sourceforge.net/project/nagios/nrpe-2.x/nrpe-2.15/nrpe-2.15.tar.gz
	tar xvf nrpe-*.tar.gz
	cd nrpe-*
	./configure --enable-command-args --with-nagios-user=nagios --with-nagios-group=nagios --with-ssl=/usr/bin/openssl --with-ssl-lib=/usr/lib/x86_64-linux-gnu
	make all
	sudo make install
	sudo make install-xinetd
	sudo make install-daemon-config
	sudo vi /etc/xinetd.d/nrpe
# in file /etc/xinetd.d/nrpe, change line to allow which hosts can connect via NRPE
	#only_from = 127.0.0.1 192.168.33.0/24
		sudo service xinetd restart
	sudo vi /usr/local/nagios/etc/nagios.cfg
	#cfg_dir=/usr/local/nagios/etc/servers # Uncomment
	sudo mkdir -p /usr/local/nagios/etc/servers
	sudo vi /usr/local/nagios/etc/objects/contacts.cfg #change admin email to send notifications
	sudo vi /usr/local/nagios/etc/objects/commands.cfg
	#sudo /etc/nagios/commands.cfg
	# Add following line: Remove the leading # in lines 
	#define command{
        #command_name check_nrpe
        #command_line $USER1$/check_nrpe -H $HOSTADDRESS$ -c $ARG1$
	#}

sudo htpasswd -c /usr/local/nagios/etc/htpasswd.users nagiosadmin
sudo systemctl start nagios.service
sudo systemctl restart httpd.service
# Enable nagios to run in multi user mode and on bootup 
sudo chkconfig nagios on
# Grant permissions to run the web app for Nagios
sudo vi /etc/httpd/conf.d/nagios.conf
Order allow,deny
Allow from all

ec2 cli -1

#!/bin/bash

# EC2

# List your running EC2 istances
aws ec2 describe-instances

# Stops an instance
aws ec2 stop-instances --instance-ids i-004f15f18e76bb7eb

# Starts a stopped instance
aws ec2 start-instances --instance-ids i-004f15f18e76bb7eb

# Reboots an instance
aws ec2 reboot-instances --instance-ids i-004f15f18e76bb7eb 

# List image information
aws ec2 describe-images --image-ids ami-340aae4e

#Creates an image from an instance
aws ec2 create-image --instance-id i-004f15f18e76bb7eb --name "WebServer AMI" --description "WebServer for dev team"



aws --version

# Best practice, verify user configuration
aws configure

# Lists all buckets
aws s3 ls

# Lists contents of named bucket
aws s3 ls s3://demo-simple-lynn-data

# Create a new bucket in a particular region
aws s3 mb s3://demo-simple-lynn-new-today --region us-west-1

# Lists all buckets
aws s3 ls



how to launch instances 


!/bin/bash

IMAGENAME=BASTION
INSTANCEID=i-076d3582c7db06190
IMAGEDESCRIP="An AMI for $IMAGENAME"


function createImage {
	aws ec2 create-image --instance-id $INSTANCEID --name $IMAGENAME --description "$IMAGEDESCRIP"

}

# aws configure
createImage
# aws ec2 create-image --instance-id i-034a9a2d28543a0de --name "BASTION" --description "An AMI for BASTION"
# apt install ec2-api-tools
#launchImage
# aws ec2 run-instances --image-id ami-b911ded4 --count 1 --instance-type t2.micro --key-name BASTIONKEYPAIR --security-group-ids sg-a3eaadd8 --subnet-id subnet-013c3c77 --tags Key=Name,Value=BASTION




#!/bin/bash
aws ec2 create-image --region=us-west-2 --instance-id=i-acc8cea1 --name=smtp`date +%m%d%y` --no-reboot
aws ec2 create-image --region=us-west-2 --instance-id=i-549d8b5c --name=vms`date +%m%d%y` --no-reboot
#!/bin/bash
ec2-describe-images | grep `date --date="4 days ago" +%Y-%m-%d` | awk '{print "Deregistering-> " $2; system("ec2-deregister " $2)}'
ec2-describe-instances | grep instance | awk '{print "Creating -> " $3; system("ec2-create-image --name " $5 "-$(date +%F) --no-reboot " $3)}'
ec2-describe-snapshots | sort -k 5 | awk '{print "Deleting-> " $2; system("ec2-delete-snapshot " $2)}'




#!/bin/bash


_instanceIDS="instanceIDS " # Put Instance ID separated by space

_date=`date +"%m-%B-%Y"`


for instanceID in ${_instanceIDS[@]};do

        # Get the Tag Associated with the EC2 Instances
       _tag=$(aws ec2 describe-instances  --filters "Name=instance-id,Values=$instanceID " --query Reservations[*].Instances[*].Tags[*].Value --output text)
     
        
      echo "Creating AMI for $_tag Instance having instance ID : $instanceID"
      
      aws ec2 create-image --instance-id $instanceID --description $_tag-AMI-$_date --no-reboot 
done



#!/bin/sh
 
instances=$(aws ec2 describe-instances --filters Name=tag-key,Values=backup --query 'Reservations[*].Instances[*].[InstanceId,to_string(Tags[?Key==`backup`].Value),to_string(Tags[?Key==`Name`].Value)]' --output text | tr -d "[" | tr -d "]" | tr -d "\"" | awk '{print $1","$2","$3}')
 
for instance in $instances
do
  parts=$(echo $instance | sed -e "s/,/ /g")
  columns=($parts)
  instance_id=${columns[0]}
  name=${columns[2]}
  aws ec2 create-image --instance-id $instance_id --no-reboot --name ${name}_`date +"%Y%m%d%H%M%S"`
done

Reverse Proxy for Node.js application using Apache

Node.js

Node.js is widely used to build web applications with real-time, two-way connections, where both the client and server can initiate communication, allowing them to exchange data freely. The best side is that we don’t need a host container for building a web app just means including a library that listens for HTTP requests on a port and responds. It is as simple as just coding your app and let it listen on any non-privileged port. Eg. 3000.

Need for configuring Apache with Node.js

With time things started getting complicated. The requirement of running node applications on privileged ports like 80, 443 starts emerging up.

Advantages of running Node.js with Apache

  1. It provides a cherry topping over cake, You can combine functionality from other languages like PHP with your node app
  2. Enhancing Security using Apache mod_proxy, nginx or haproxy to route requests to apps. This sit as a layer between your apps and the wide web.
  3. It Fulfills need to cache your static content and handle/scale to more users/connections per second than your node app probably can.
  4. It can also take care of the https termination, subdomain routing, redirects and things like that.
  5. Adds option to scale your application by performing load balancing leads to avoiding downtime for production environments.
  6. Allows the user to run multiple Node.js on HTTP port.
  7. This tutorial will guide you through to achieve these requirements.

Prerequisites:

LAMP Server with Node.js and npm installed over it. I am using Ubuntu machine for this demo

Using Apache’s Mod_Proxy Module (Non – SSL)

Enable the mod_proxy and mod_proxy_html Apache modules. They should be available by default so just enable them with the a2enmod command. sudo a2enmod proxy sudo a2enmod proxy_http. For more information on mod_proxysee the apache official documentation.
Condition: You must have started you node application and it is successfully over http://mydomian.com:3000/

<VirtualHost *:80>
  ServerName mydomain.com
  ServerAlias www.mydomain.com
  DocumentRoot /var/www/nodeapp/
  Options -Indexes
  ErrorDocument 503 /check.html

  ProxyRequests on
  ProxyPass /check.html !
  ProxyPass / http://mydomain.com:3000/
  ProxyPassReverse / http://mydomain.com:3000/
</VirtualHost>

We have created the virtual host for domain mydomain.com on which we want to run a node application. We have enabled the proxy module in apache and check.html will not be proxied through but will instead be served by the Apache server as a ‘normal’ page (the! means it won’t be sent through; for more info on the proxypass directive see the apache official documentation.

Using above proxy configuration in a vhost file you will magically get your node server’s responses running over http://localhost:3000/ by simply running the domain namemydomain.com.

Using Apache’s Mod_Proxy Module (SSL)

Condition: you must have started you node application and it is successfully over https://mydomian.com:3000/

<VirtualHost *:443>
  ServerName mydomain.com
  ServerAlias www.mydomain.com
  DocumentRoot /var/www/nodeapp/
  Options -Indexes
  ErrorDocument 503 /check.html

  SSLProxyEngine On
  ProxyPass /check.html !
  ProxyPass / https://mydomain.com:3000
  ProxyPassReverse / https://mydomain.com:3000
  ProxyPreserveHost On

  SSLEngine on
  SSLCertificateFile /etc/apache2/ssl/mydomain.com.crt
  SSLCertificateKeyFile /etc/apache2/ssl/mydomain.com.key
  SSLCertificateChainFile /etc/apache2/ssl/mydomain.com:3000.ca-bundle
</VirtualHost>

Secondary Option:

Routing Application Using IPTABLES

One way is route traffic of port 80 to desired port internally using iptables. In below example, we have routed all incoming traffic on port 80 to port 3000.

sudo iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 80 -j REDIRECT --to-port 3000

Backup Amazon EBS Volume Snapshots from Amazon Ec2

ata is the most important factor in any business and taking its backup on a regular interval of time is a handsome practice. We have come up the technique to auto backup Amazon EBS volume snapshots from running Amazon Ec2 instance using AWS CLI.

Prerequisite

  • Amazon AWS account for creating Access key ID and Secret access key.
  • Running Amazon Ec2 Instance.
  • AWS Command Line Interface.

Create Access key ID and Secret access key

  • Log in to your Amazon Dashboard Console Account.
  • Click on IAM management console.
  • Click on ADD Users to create a new user for backing up EBS Volume.

 

 

Installing AWS Command Line Interface

We are using centos7 machine for the demonstration

 

python –version
curl “https://bootstrap.pypa.io/get-pip.py” -o “get-pip.py”
python get-pip.py
pip -V
pip install awscli
pip install –upgrade awscli

Configure AWS CLI on Linux

After installing awscli, there is a requirement of configuring awscli on Amazon Ec2 instance. Hit command aws configure and Enter the AWS Access Key ID, AWS Secret Access Key, and Default region name when prompted. Default output format can be left blank.

aws configure
AWS Access Key ID [None]: AKIAIUARU2RSR74GQRXA
AWS Secret Access Key [None]: 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
Default region name [None]: us-east-1
Default output format [None]:

We are done with all the Prerequisite, now we will go ahead and schedule auto backups for Amazon EBS volume from Amazon Ec2 instances using awscli.


Auto Backup Amazon EBS Volumes From Amazon Ec2

Use the below command to create a snapshot of Amazon EBS volume from Ec2 Instance. Enter your EBS volume ID for which you want to take backup or create a snapshot and give a right description message.

Command

aws ec2 create-snapshot --volume-id vol-xxxxxxxx --description "Relevant description Message"


How to obtain the EBS volume ID

Log into AWS Dashboard >> Clicking on service Ec2 >> Select the Ec2 instance >> Choose Block Device

 

 

Apply Cronjob to Auto Backup Snapshots of Amazon EBS.

crontab -e
######## Automating EBS Volume Snapshots ########

00 09 * * * aws ec2 create-snapshot --volume-id vol-0234f278482d2e197 --description "Disk Backup for Linux Together Demo Server"

This brings an end to the tutorial. In the above tutorial, we have elaborated the technique to backup Amazon EBS volume using awscli. Write down your questions and queries in the comment section. Do like and share the share the blog, We will be back with more interesting tutorials asap.  Thanks!

vsftpd

This howto describes how to configure vsftpd to enable SSL using so called intermediate/ chaining certificates.

Edit vsftpd.conf so that SSL is enabled:

 

ssl_enable=YES

rsa_cert_file=/usr/share/ssl/certs/vsftpd.pem

force_local_data_ssl=No

force_local_logins_ssl=NO

It is very important to construct the certificate file /usr/share/ssl/certs/vsftpd.pem with the correct certificate order. The fist Your certificate file has to be a .pem file. If you also received an Intermediate Certificate then you have to concatenate this with the Domain Certificate and your Private Key file into one single .pem file. Make sure all the information is included, without any spaces or blanks, see below.

—–BEGIN CERTIFICATE—–

(your_domain_name.crt)

—–END CERTIFICATE KEY—–

—–BEGIN CERTIFICATE—–

(chaining certificate 3)

—–END CERTIFICATE KEY—–

—–BEGIN CERTIFICATE—–

(chaining certificate 2)

—–END CERTIFICATE KEY—–

—–BEGIN CERTIFICATE—–

(chaining certificate 1)

—–END CERTIFICATE KEY—–

—–BEGIN RSA PRIVATE KEY—–

(your_domain_name.key)

—–END RSA PRIVATE KEY—–

This is how to check a SSL enabled FTP service (FTP Secure). See the result below:

$ lftp -u username localhost -e “debug;set ftp:ssl-protect-data true;ls;exit”

Password:

 

$

xferlog_file=/var/log/vsftpd.log
xferlog_enable=YES
dirmessage_enable=YES
data_connection_timeout=600
dual_log_enable=YES
pam_service_name=vsftpd
userlist_enable=YES
tcp_wrappers=YES
ssl_enable=YES
allow_anon_ssl=NO
force_local_data_ssl=YES
force_local_logins_ssl=YES
ssl_tlsv1=NO
ssl_sslv2=NO
ssl_sslv3=NO
ssl_tlsv1_2=YES
ssl_ciphers=HIGH:-3DES:-aNULL
rsa_cert_file=/etc/vsftpd/rmohan.pem
pasv_min_port=28000
pasv_max_port=30000equire_ssl_reuse=NO

EC2 instance

** 
volume == hard disk
security groups == virtual firewalls
--------------------------
EC2 instances types:
'Dr mc gift pix'
D-desnsity
R-ram
m-main choice (general)
c-compute
g-graphics
i-iops
f-fpga (field-programmable gate array)
t--cheap (t2 micro)
p-graphics(pics)
x-extreme
--------------------------

termination protection-off by default.
EBS will be deleted when EC2 instance is terminated by default.
EBS root volume of default AMI cann't be encrypted. but third party tools can be used to encrypt, or by making an image copy of the instance.
additinal volumes can be encrypted
-------------------

EBS: the block storage assiciated with an EC2 instance

pricing models:
-on demand
-spot
-reserved
-dedicated host

instances are charged by the hour-rounded up. unless aws terminated the instance, then it is rounded down.

ebs consists:
 ssd, general purpose - GP2 - (up to 10,000 iops)
 ssd, provisioned iops - IO1 - more then 10,000 iops
 hdd, thouroughput optimized ST1 - frequently accessed workloads
 hdd, cold SC1 - less frequenly accessed data
 hdd, magnectic - standard - cheap less frequenly accessed data

*can't connect more then 1 EC2 instance to 1 EBS. use EFS for that

----------------
instance termination protection is turned off by default.
the root(where OS is installed) EBS is deleted when the EC2 instance is terminated by default.
root volumes can't be encrypted unless using third party tools, but ither EBS volumes can be.
----------------
volumes exist on EBS - they are virtual hard disk.
snapshots (point in time copies of volumes) exits on S3.
taking a snapshot of a volume will store that volume on S3.
snapshots are incremental - only data that changed since the last snapshot are moved to S3
snapshots are encrypted automatically
volumes restored from snapshots are ecnrypted automatically
you can share snapshot, but only if they are un-encrypted.
you should stop the instance bofore taking a snapshot of a root volume
------------------
security rules:
all inbound traffic is blocked by defualt
all outbound traffic is allowed by default
can have any numbr or instances within a security group
can have multiple security groups attached to EC2 instances
there are no 'deny' rules. on allow
security groups are stateful - if you allow a rule for traffic in, that traffic is also allowed out. (access lists are not)
you can't block specific ip adresses from security groups.

---------------------
roles: are more secure then storing the access and secret keys on EC2. easier to manage.
can be assigned to instaces after they are provisions
roles can be updated during usage.
----------------------
raid = redundant aray of indipendent disks. acting as one disk to the OS.
raid 0 - good perfoamce. no data redundancy
raid 1 - mirrored, data redundancy
raid 5 - aws doesn't recommend this for EBS. good for reads, bad for writes
raid 10 - good redundancy and performance.

to increase IO - increase disk volumes as a raid
-----------------
taking a snapshot of a Raid array: (application consistent snapshot)
stop application from writing to disk by freezing the file system / unmount the raid array / shut down the EC2 instance
flush all caches to the disk
--------------------
when taking a snapshot of a volume, the snapshot is encrypted by default
volumes restored from encrypted snapshots are also encrypted
can't share encrypted snapshots.
------------------------
AMI - amazon machine image.
AMI's are available on the store.
AMis are regional - can only be launched from the region it is stored in. but you can copy AMIs to other region. using the cli api or console.
-------------------
AMI type: EBS backed and Instance Store backed (also called ephemeral).
Instance Stores can't attach additional Instnce Store Volumes after launching. 
EBS can be stopped and re-run on a different hypervisor in case of a problem. Instance Store can't.
Instance Stores are less durable, if their host fails - the instance is lost. (ephemeral)
EBS are created from a snapshot. Instance Store are created from a template stored on S3
both can be rebooted without losing data
-------------
elastic load balancer
classic / application
*a subnet == avalability zone
has healthchecks
no ip adress. only dns names.
----------------------
cloudwatch
standart monitoring - 5 mins, detaild monitoring - 1 min
create dashboard, alarms, events, logs
cloud watch is for logginng , monitoring. cloud trail is for auditing an entire aws env/ accounts.
-----------------------
credentials are normally stored on an instance under the .aws folder. this is a security breach..
roles allow instances not to have the credentials written to a file on the instance. therfore safer
roles are global. not by zone

---------------------------------
bash script example: 
#!/bin/bash
yum update -y
yum install httpd -y
service httpd start
chkconfig httpd on
aws s3 cp s3://mywebsitebucket-acg2/index.html /var/www/html

----------------
metadata:
curl http://169.254.169.254/latest/meta-data/

---------------------------
Autoscaling  groups require launch configuration before launching.
--------------
placement group - grouping of instances within a single availability zone. used for low network latency and high performace.
only certain types of instances can be part of  a placemnet group.
recommended to use homogenous instaces types (same size and family)
can't merge groups.
can't move existing instincaes into an existing group.
------------------------
efs - elastic file system.
pay for the storge you use.
allows to use a single storage across multiple ec2 instances. could use as a central file storage.
data is stored across multiple AZ
read after write consistency
*not available in all zones yet
----------------------
lambda - no servers, auto scaling, very cheap
-------------
summary

VPC -AWS

** 1 subnet == 1 AZ.
ACL = access control list
SN = subnet
IGW = internet gateway
CIDR - classless inter-domain routing. -where we assign ip ranges
NAT - network adress translation
------------------
internal ip address ranges
(rfc 1918)
10.0.0.0 -10.255.255.255 (10/8 prefix)
172.16.0.0 - 172.31.255.255 (172.16/12 prefix)
192.168.0.0 - 192.168.255.255 (192.168 / 16 prefix)
we will always use a /16 network adress
--------------------
vpc - virtual private cloud
think of vpc as a logical data center.
you provision a section of the aws cloud in a virtual network. you can easily cutomize your network.

example - a public-facing subnet for webservers, and private-facing backend db servers with no internet connection.

you can create a hardware virtual private network (VPN) between corporate datacenter and VPC to leverage aws as an extens center (hybrid cloud)
--------------
what can you do with a VPC?

launch instances into a subnet
assign custom IP ranges in each subnet
configure toure tables betwen subnets
create internet gateway. only 1 per VPC
better security over aws resources
security groups (are stateful - incoming rules are automatically allowed as outgoing)
subnet ACL (not stateful. everything needs to be configured)
------------------
default VPC 
defualt have a route out to the internet
each EC2 has a public and private ip
the only way to restore a default VPC is to contact aws.

security groups , ACL , default Route table are created by default.
subnets and IGW are not created by default.
---------
Peering
connect one VPC to another using private ip addresses. (not over the internet)
instances behave as if they are on same network.
can also peer VPC with other AWS accounts VPC withing a SINGLE REGION
star configuration - 1 central VPC peers with 4 others. NO TRASITIVE PEERING == the networks must be directly connected.
example VPC-a is connected to VPC-b and VPC-c. VPC-b and VPC-c cann't talk to each other through VPC-a (transitive). they must be directly peered. 
CIDR blocks for the private IP's must be different between peering VPCs - VPC A 10.0.0.0/16 cant peer to VPC B if it has 10.0.0.0/24
----------------
NAT 
NAT instances - traditional use for allowing an EC2 instance with no internet connection to have access to the internet for updates, install dbs... we use an EC2 instance from the community AMI search for NAT.
remember to disable source/destination check on the instance.
must be in a public subnet
in the route table ensure there's a route out to the NAT instance. it's found in the default route table.
the bandwidth the NAT instance supports depends on the instance type.
to create high availability you need to use autoscaling groups, multiple subnets in different AZ and scripts to automate failover
(lots of work..)
need to set security group

NAT gateways - easier access. preferd. scales automatically and no need to set security groups. 
if a NAT instance goes down, so does our internet connection. but with NAT gateways aws takes care of that automatically. supports bandwidth up to 10gb

---------------
building a VPC process (not using the wizard):

1. start vpc , your VPC, create VPC
2. name, CIDR block (our ip ranges. we used 10.0.0.0/16), tenancy (shared or dedicated hardware).
3. default route table, ACL, security groups are created
4. subnets-> create -> name, vpc (select the newly created one), AZ , CIDR (we used 10.0.1.0/24 which will give us 10.0.1.xxx)
5. create anoter subnet ->name , vpc (same as above), AZ(different then above), CIDR (10.0.2.0/24)
6. internet gateways ->create ->name
7. attach to vpc (select newly created vpc)
8. route tables -> (main route table is private by default)
9. create new table -> name, vpc
10. edit -> add route that is open to the internet
11. subnet associations -> edit -> select a subnet that will be the public one
12. subnets -> selected the public one -> actions -> modify autoassign public ip.
13. deploy 2 EC2 instances. one is a public web server (can use a script). one is a private sql server. (notice for the auto-assign public ip..). for the private instace, set a new security group with ssh-10.0.1.0/24, mysql/aurora-10.0.1.0/24
14. for the mysqlserver add another rule for all ICMP with same ip address - this allows ping.
15. copy the content of the privateKey.pem file.
16. ssh into the web server -> create (echo or nano) new privateKey.pem file and paste the content.
17. chmod 0600 the privteKey.pem file (gives read and write privileges to that file).
18. ssh into sql server using the newly created file.
19. (NAT instace) launch an EC2 instance -> community -> search for nat
20. deploy into our VPC, and put into the public subnet.
21. use a public facing security group
22. actions -> networking -> change source/destination check->disable
23. VPC->route tables -> select the main route (nameless) -> add 0.0.0.0/0 target- our newly created EC2
 (?? associate public subnet )
24. (NAT gateway - replaces steps 19-23) VPC -> NAT gateways -> create -> subnet(public facing), elastic ip(create new EIP)
25. route tables -> main route table ->add 0.0.0.0/0 target - the newly created gateway.

--------------
security groups vs NACL:

security group acts as the first layer of defence. operates at the instance level. stateful
N(network)ACL operates at the subnet level. stateless. denies all traffic by default

a subnet can only be assiciated with one NACL. but an NACL can be assiciated with many subnets.
if you try to add a ACL to a subnet the is already associated with an ACL, the new ACL will just replace the old one.

rules are evalutaed in numerical order.
the lowest number rules have precedens over later rules.
example:
rule 99 blocks my ip
rull 100 allows all ips
==my ip is still blocked.

you can't block using a security group
---------------------------------------------
notes:
when setting up an ELB, to get good availability you need  at least two AZ or subnets. So notice if your VPC actually has more then 1 public subnet

Bastion - used to securly administer EC2 instances in private subnets (using ssh or RDP-remote desktop protocal). used instaed of NAT. 
for our purposes, we used the nat-EC2 as a Bastion

Flow logs - enable you to capture IP traffic flow information for the network interfaces in your resources and log them in cloudWatch.