# https://www.terraform.io/docs/commands/output.html
# https://learn.hashicorp.com/terraform/getting-started/outputs.html
# Output the result
output "show-ads" {
value = "${data.oci_identity_availability_domains.ADs.availability_domains}"
}
datasources.tf
# https://www.terraform.io/docs/configuration/data-sources.html
# https://www.terraform.io/docs/providers/oci/d/identity_availability_domains.html
# Gets a list of Availability Domains
data "oci_identity_availability_domains" "ADs" {
compartment_id = "${var.compartment_ocid}"
}
# https://www.terraform.io/docs/providers/oci/d/core_images.html
data "oci_core_images" "oracle_linux_image" {
compartment_id = "${var.compartment_ocid}"
operating_system = "Oracle Linux"
operating_system_version = "7.6"
}
PS D:\practices\terraform\oke> terraform init
Initializing provider plugins...
- Checking for available provider plugins on https://releases.hashicorp.com...
- Downloading plugin for provider "oci" (3.20.0)...
- Downloading plugin for provider "local" (1.2.0)...
The following providers do not have any version constraints in configuration,
so the latest version was installed.
To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.
* provider.local: version = "~> 1.2"
* provider.oci: version = "~> 3.20"
Terraform has been successfully initialized!
You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.
If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
PS D:\practices\terraform\oke>
resource "oci_core_instance" "Bastion" {
#Required
availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain - 1],"name")}"
compartment_id = "${var.compartment_ocid}"
shape = "${var.instance_shape}"
#Optional
count = "${var.NumInstances}"
display_name = "Bastion${count.index}"
create_vnic_details {
#Required
subnet_id = "${oci_core_subnet.terraform_subnet.id}"
#Optional
display_name = "primaryvnic"
assign_public_ip = true
hostname_label = "Bastion${count.index}"
private_ip = "10.0.0.2"
}
source_details {
source_type = "image"
source_id = "${"${var.instance_image_ocid[var.region]}"}"
# Apply this to set the size of the boot volume that's created for this instance.
# Otherwise, the default boot volume size of the image is used.
# This should only be specified when source_type is set to "image".
#boot_volume_size_in_gbs = "60"
}
# Apply the following flag only if you wish to preserve the attached boot volume upon destroying this instance
# Setting this and destroying the instance will result in a boot volume that should be managed outside of this config.
# When changing this value, make sure to run 'terraform apply' so that it takes effect before the resource is destroyed.
#preserve_boot_volume = true
metadata {
ssh_authorized_keys = "${file(var.ssh_public_key_file)}"
user_data = "${base64encode(file(var.BootStrapFile))}"
}
timeouts {
create = "60m"
}
}
PS D:\practices\terraform\bastion> terraform init
Initializing provider plugins...
The following providers do not have any version constraints in configuration,
so the latest version was installed.
To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.
* provider.oci: version = "~> 3.16"
Terraform has been successfully initialized!
You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.
If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
PS D:\practices\terraform\bastion>
PS D:\practices\terraform\vcn> .\env-vars.ps1
SUCCESS: Specified value was saved.
SUCCESS: Specified value was saved.
SUCCESS: Specified value was saved.
SUCCESS: Specified value was saved.
SUCCESS: Specified value was saved.
SUCCESS: Specified value was saved.
execute terraform init
PS D:\practices\terraform\vcn> terraform init
Terraform initialized in an empty directory!
The directory has no Terraform configuration files. You may begin working
with Terraform immediately by creating Terraform configuration files.
PS D:\practices\terraform\vcn> terraform init
Initializing provider plugins...
- Checking for available provider plugins on https://releases.hashicorp.com...
- Downloading plugin for provider "oci" (3.16.0)...
The following providers do not have any version constraints in configuration,
so the latest version was installed.
To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.
* provider.oci: version = "~> 3.16"
Terraform has been successfully initialized!
You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.
If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
Execute terraform plan
PS D:\practices\terraform\vcn> terraform plan
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ oci_core_virtual_network.terraform-vcn
id: <computed>
cidr_block: "10.0.0.0/16"
compartment_id: "ocid1.compartment.oc1..sfadsfsaaaafasdfdsafsadfdsfdsafdsafsadfs"
default_dhcp_options_id: <computed>
default_route_table_id: <computed>
default_security_list_id: <computed>
display_name: "terraform-vcn"
dns_label: "vcn1"
freeform_tags.%: <computed>
state: <computed>
time_created: <computed>
vcn_domain_name: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.
------------------------------------------------------------------------
Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.
Execute terraform apply
PS D:\practices\terraform\vcn> terraform apply
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
+ create
Terraform will perform the following actions:
+ oci_core_virtual_network.terraform-vcn
id: <computed>
cidr_block: "10.0.0.0/16"
compartment_id: "ocid1.compartment.oc1..afdsafdsafdsafdsafsadfdsafdsaf"
default_dhcp_options_id: <computed>
default_route_table_id: <computed>
default_security_list_id: <computed>
display_name: "terraform-vcn"
dns_label: "vcn1"
freeform_tags.%: <computed>
state: <computed>
time_created: <computed>
vcn_domain_name: <computed>
Plan: 1 to add, 0 to change, 0 to destroy.
Do you want to perform these actions?
Terraform will perform the actions described above.
Only 'yes' will be accepted to approve.
Enter a value: yes
oci_core_virtual_network.terraform-vcn: Creating...
cidr_block: "" => "10.0.0.0/16"
compartment_id: "" => "ocid1.compartment.oc1..afdsafdsafdsafsadfsd"
default_dhcp_options_id: "" => "<computed>"
default_route_table_id: "" => "<computed>"
default_security_list_id: "" => "<computed>"
display_name: "" => "terraform-vcn"
dns_label: "" => "vcn1"
freeform_tags.%: "" => "<computed>"
state: "" => "<computed>"
time_created: "" => "<computed>"
vcn_domain_name: "" => "<computed>"
oci_core_virtual_network.terraform-vcn: Creation complete after 8s (ID: ocid1.vcn.oc1.eu-frankfurt-1.aaaaaaaayl...afsafdsfdsfsdafdsa)
Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
PS D:\practices\terraform\vcn>
D:\practices\terraform\vcn>terraform destroy
oci_core_virtual_network.terraform-vcn: Refreshing state... (ID: ocid1.vcn.oc1.eu-frankfurt-1.aaaaaaaayl...asfdsafsdafsdfsda)
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
- destroy
Terraform will perform the following actions:
- oci_core_virtual_network.terraform-vcn
Plan: 0 to add, 0 to change, 1 to destroy.
Do you really want to destroy all resources?
Terraform will destroy all your managed infrastructure, as shown above.
There is no undo. Only 'yes' will be accepted to confirm.
Enter a value: yes
oci_core_virtual_network.terraform-vcn: Destroying... (ID: ocid1.vcn.oc1.eu-frankfurt-1.aaaaaaaayl...asfdsafsadfdsafsa)
oci_core_virtual_network.terraform-vcn: Destruction complete after 1s
Destroy complete! Resources: 1 destroyed.
D:\practices\terraform\vcn>
Edit the /etc/selinux/config file and change the SELINUX=enforcing line to SELINUX=permissive
[opc@k8s-master ~]$ cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@k8s-master ~]# systemctl enable docker && systemctl start docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@k8s-master ~]# systemctl enable kubelet && systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
Disable Swap
[root@k8s-worker ~]# swapoff -a && sed -i '/ swap / s/^/#/' /etc/fstab
[root@k8s-master ~]# kubeadm reset -f && rm -rf /etc/kubernetes/
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks
[reset] Removing info for node "k8s-master" from the ConfigMap "kubeadm-config" in the "kube-system" Namespace
W0807 17:52:28.097752 17828 removeetcdmember.go:61] [reset] failed to remove etcd member: error syncing endpoints with etc: etcdclient: no available endpoints
.Please manually remove this etcd member using etcdctl
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually.
For example:
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.
The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.
Initialize cluster
[root@k8s-master ~]# kubeadm init
kubeadm init output
[init] Using Kubernetes version: v1.15.2
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.6]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [10.0.0.6 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [10.0.0.6 127.0.0.1 ::1]
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.503514 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 9f9xuf.su3t8exlnqejevtd
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.0.0.6:6443 --token 9f9xuf.su3t8exlnqejevtd \
--discovery-token-ca-cert-hash sha256:6c61d0f6239d61af6de250abbce3f37122298be6a6cc27d05766128a0b844181
Copy the last two lines we need it later
Kubeadm has deployed all the necessary Control Plane components, including etcd, the API server, kube-proxy, Scheduler, and Controller Manager
[root@k8s-master ~]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-n2htv 0/1 Pending 0 64s
coredns-5c98db65d4-qnvxr 0/1 Pending 0 64s
etcd-k8s-master 1/1 Running 0 15s
kube-apiserver-k8s-master 0/1 Pending 0 2s
kube-controller-manager-k8s-master 0/1 Pending 0 9s
kube-proxy-hllq2 1/1 Running 0 64s
kube-scheduler-k8s-master 0/1 Pending 0 9s
[root@k8s-master ~]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-n2htv 0/1 Pending 0 74s
coredns-5c98db65d4-qnvxr 0/1 Pending 0 74s
etcd-k8s-master 1/1 Running 0 25s
kube-apiserver-k8s-master 1/1 Running 0 12s
kube-controller-manager-k8s-master 1/1 Running 0 19s
kube-proxy-hllq2 1/1 Running 0 74s
kube-scheduler-k8s-master
[root@k8s-master ~]# kubectl describe pod coredns-5c98db65d4-n2htv -n kube-system
Name: coredns-5c98db65d4-n2htv
Namespace: kube-system
Priority: 2000000000
Priority Class Name: system-cluster-critical
Node: <none>
Labels: k8s-app=kube-dns
pod-template-hash=5c98db65d4
Annotations: <none>
Status: Pending
IP:
Controlled By: ReplicaSet/coredns-5c98db65d4
Containers:
coredns:
Image: k8s.gcr.io/coredns:1.3.1
Ports: 53/UDP, 53/TCP, 9153/TCP
Host Ports: 0/UDP, 0/TCP, 0/TCP
Args:
-conf
/etc/coredns/Corefile
Limits:
memory: 170Mi
Requests:
cpu: 100m
memory: 70Mi
Liveness: http-get http://:8080/health delay=60s timeout=5s period=10s #success=1 #failure=5
Readiness: http-get http://:8080/health delay=0s timeout=1s period=10s #success=1 #failure=3
Environment: <none>
Mounts:
/etc/coredns from config-volume (ro)
/var/run/secrets/kubernetes.io/serviceaccount from coredns-token-4jpfz (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
config-volume:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: coredns
Optional: false
coredns-token-4jpfz:
Type: Secret (a volume populated by a Secret)
SecretName: coredns-token-4jpfz
Optional: false
QoS Class: Burstable
Node-Selectors: beta.kubernetes.io/os=linux
Tolerations: CriticalAddonsOnly
node-role.kubernetes.io/master:NoSchedule
node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 6s (x19 over 106s) default-scheduler 0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master NotReady master 3m57s v1.15.2
[root@k8s-master ~]#
#
Kubelet isn’t fully ready yet, because the container network (CNI) plugin isn’t installed yet. lets deploy Weave Net container networking plugin. Several alternatives are also available
[root@k8s-master ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.extensions/weave-net created
Edit the /etc/selinux/config file and change the SELINUX=enforcing line to SELINUX=permissive
[opc@k8s-worker ~]$ cat /etc/selinux/config
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
[root@k8s-worker ~]# systemctl enable docker && systemctl start docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
[root@k8s-worker ~]# systemctl enable kubelet && systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
Join this node
[root@k8s-worker ~]# kubeadm join 10.0.0.6:6443 --token 9f9xuf.su3t8exlnqejevtd \
> --discovery-token-ca-cert-hash sha256:6c61d0f6239d61af6de250abbce3f37122298be6a6cc27d05766128a0b844181
[preflight] Running pre-flight checks
[WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
[root@k8s-worker ~]#
Step 4 : Testing
[opc@k8s-master ~]$ sudo su -
Last login: Wed Aug 7 16:50:02 GMT 2019 on pts/0
[root@k8s-master ~]# export KUBECONFIG=/etc/kubernetes/admin.conf
[root@k8s-master ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 8h v1.15.2
k8s-worker Ready <none> 96s v1.15.2
[root@k8s-master ~]# kubectl run nginx --image=nginx --port=80 --replicas=2
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@k8s-master ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-7c45b84548-9lxn7 1/1 Running 0 25s 10.44.0.1 k8s-worker <none> <none>
nginx-7c45b84548-z5gwx 1/1 Running 0 25s 10.44.0.2 k8s-worker <none> <none>
Create a service to connect to your nginx deployment.
–type=LoadBalancer is not supported using this approach
You can manually create OCI load balancer that routes to the cluster or expose the node port publicly via Oracle Cloud Infrastructure’s security lists.
[root@k8s-master ~]# kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 8h
nginx NodePort 10.102.90.50 <none> 80:30215/TCP 6s
[root@k8s-master ~]# curl 10.0.0.7:30215
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
<p><em>Thank you for using nginx.</em></p>
</body>
</html>
D:\practices\kubernetes\conf>kubectl get nodes
Unable to connect to the server: x509: certificate is valid for 10.96.0.1, 10.0.0.6, not 130.61.240.53
D:\practices\kubernetes\conf>kubectl --insecure-skip-tls-verify get nodes
NAME STATUS ROLES AGE VERSION
k8s-master Ready master 9h v1.15.2
k8s-worker Ready <none> 28m v1.15.2
PS C:\WINDOWS\system32> kubectl get nodes -o=wide
NAME STATUS ROLES AGE VERSION EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
10.0.10.11 Ready node 7h v1.12.7 <none> Oracle Linux Server 7.6 4.14.35-1902.2.0.el7uek.x86_64 docker://18.9.1
10.0.11.10 Ready node 7h v1.12.7 <none> Oracle Linux Server 7.6 4.14.35-1902.2.0.el7uek.x86_64 docker://18.9.1
10.0.12.13 Ready node 7h v1.12.7 <none> Oracle Linux Server 7.6 4.14.35-1902.2.0.el7uek.x86_64 docker://18.9.1
PS C:\WINDOWS\system32>
able to ssh into nodes from bastion
[opc@bastion01-4772 ~]$ ssh [email protected]
The authenticity of host '10.0.10.11 (10.0.10.11)' can't be established.
ECDSA key fingerprint is SHA256:AjoqfhDP/v1alWhfO6wkb4zNfSo6c6PI9hTmjy6n+cI.
ECDSA key fingerprint is MD5:65:56:a3:1a:c5:b8:e0:91:ea:59:60:df:e5:31:8d:87.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.10.11' (ECDSA) to the list of known hosts.
Oracle Linux Server 7.6
[opc@oke-c4dczdcmyzd-nydgnrvg44d-snzovaz7x6q-0 ~]$ exit
logout
Connection to 10.0.10.11 closed.
[opc@bastion01-4772 ~]$ ssh [email protected]
The authenticity of host '10.0.11.10 (10.0.11.10)' can't be established.
ECDSA key fingerprint is SHA256:DFgXRe/cUha3luqY8NJKiTIfnSzndjUCxTydi2kQLjw.
ECDSA key fingerprint is MD5:96:8b:6e:08:c9:56:c9:7b:52:85:4b:1e:bf:e9:e3:5c.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.11.10' (ECDSA) to the list of known hosts.
Oracle Linux Server 7.6
[opc@oke-c4dczdcmyzd-nydgnrvg44d-svwagmiyvwq-0 ~]$ exit
logout
Connection to 10.0.11.10 closed.
[opc@bastion01-4772 ~]$ ssh [email protected]
The authenticity of host '10.0.12.13 (10.0.12.13)' can't be established.
ECDSA key fingerprint is SHA256:1dMIxDQKxZ7mBCHKHTaCNuZ1bCIQmXikvLtFx6pEMFA.
ECDSA key fingerprint is MD5:27:b3:8d:b1:53:51:1e:25:11:be:22:83:a5:09:0f:61.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.12.13' (ECDSA) to the list of known hosts.
Oracle Linux Server 7.6
[opc@oke-c4dczdcmyzd-nydgnrvg44d-sa6zgwm6lrq-0 ~]$ exit
logout
Connection to 10.0.12.13 closed.
Lets create secret to be used in deployment descriptor
D:\practices\kubernetes\demo>docker login fra.ocir.io
Authenticating with existing credentials...
Login Succeeded
# Load mod_jk module
LoadModule jk_module "/etc/httpd/modules/mod_jk.so"
# Add the module (activate this lne for Apache 1.3)
# AddModule mod_jk.c
# Where to find workers.properties
JkWorkersFile /etc/httpd/conf/workers.properties
# Where to put jk shared memory
JkShmFile /var/run/httpd/mod_jk.shm
# Where to put jk logs
JkLogFile /var/log/httpd/mod_jk.log
# Set the jk log level [debug/error/info]
JkLogLevel debug
JkLogStampFormat "[%a %b %d %H:%M:%S %Y]"
JkOptions +ForwardKeySize +ForwardURICompat -ForwardDirectories
JkRequestLogFormat "%w %V %T"
# Mounts
JkMount /sample* tomcat1Worker
JkMount /examples* tomcat2Worker
JkMount /date-service* okeWorker
Here is my configuration, note file system is being created on same subnet, and hence security list modification not required, otherwise we would have to modify security list
Create StorageClass
Get the OCID of the mount target
Create the storage class that references the mount target
D:\practices\kubernetes>kubectl apply -f oke-pv.yml
persistentvolume "oke-fsspv" created
pv created
D:\practices\kubernetes>kubectl get pv -o wide
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
oke-fsspv 100Gi RWX Retain Available oci-fss 37s
D:\practices\kubernetes>
D:\practices\kubernetes>kubectl apply -f oke-pvc.yml
persistentvolumeclaim "oke-fsspvc" created
pvc created
D:\practices\kubernetes>kubectl get pvc -o wide
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
oke-fsspvc Bound oke-fsspv 100Gi RWX oci-fss 21s
D:\practices\kubernetes>
Verify that PVC is bound
D:\practices\kubernetes>kubectl get pvc oke-fsspvc
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
oke-fsspvc Bound oke-fsspv 100Gi RWX oci-fss 1m
D:\practices\kubernetes>
D:\practices\kubernetes>kubectl apply -f consume-pvc.yml
service "oke-fss-pvc-svc" created
deployment.apps "oke-fss-pvc" created
D:\practices\kubernetes>
D:\practices\kubernetes>kubectl get pods
NAME READY STATUS RESTARTS AGE
oke-fss-pvc-6fdf5c767b-4g6gx 1/1 Running 0 23s
oke-fss-pvc-6fdf5c767b-npsdl 1/1 Running 0 23s
oke-fss-pvc-6fdf5c767b-wrwwf 1/1 Running 0 23s
D:\practices\kubernetes>kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
oke-fss-pvc-svc LoadBalancer 10.96.149.248 129.146.147.113 80:30647/TCP 32s
Testing
no index.html file as of now
Lets connect to all the pods and write some content from each pods.
Lets write some content from pod oke-fss-pvc-6fdf5c767b-wrwwf
D:\practices\kubernetes>kubectl exec -it oke-fss-pvc-6fdf5c767b-wrwwf bash
root@oke-fss-pvc-6fdf5c767b-wrwwf:/# cd /usr/share/nginx/html/
root@oke-fss-pvc-6fdf5c767b-wrwwf:/usr/share/nginx/html# echo <br/> oke-fss-pvc-6fdf5c767b-wrwwf >> index.html
bash: br/: No such file or directory
root@oke-fss-pvc-6fdf5c767b-wrwwf:/usr/share/nginx/html# echo "<br/> oke-fss-pvc-6fdf5c767b-wrwwf" >> index.html
root@oke-fss-pvc-6fdf5c767b-wrwwf:/usr/share/nginx/html#
able to see the content written
Lets delete all the pods
D:\practices\kubernetes>kubectl delete pods --all
pod "oke-fss-pvc-6fdf5c767b-4g6gx" deleted
pod "oke-fss-pvc-6fdf5c767b-npsdl" deleted
pod "oke-fss-pvc-6fdf5c767b-wrwwf" deleted
D:\practices\kubernetes>kubectl get pods
NAME READY STATUS RESTARTS AGE
oke-fss-pvc-6fdf5c767b-6676n 1/1 Running 0 29s
oke-fss-pvc-6fdf5c767b-cqhwz 0/1 ContainerCreating 0 30s
oke-fss-pvc-6fdf5c767b-gmgq8 1/1 Running 0 28s
D:\practices\kubernetes>
Normally, a LoadBalancer service is created for each public system that has to be exposed, which exposes a public ip address as well, which can end up being expensive.
Ingress gives a way to route requests to services based on the request host or path, decreasing the number of public ip address to just one.
The Ingress Controller listens to the Kubernetes API for Ingress resources and routes these requests to the pods according to these rules.
Essentially, an Ingress Controller is a system that is able to do reverse proxying.
Prerequisites
Example Backend
Download the project and build the the two images,
PS D:\practices\kubernetes\app> kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml
namespace "ingress-nginx" created
configmap "nginx-configuration" created
configmap "tcp-services" created
configmap "udp-services" created
serviceaccount "nginx-ingress-serviceaccount" created
clusterrole.rbac.authorization.k8s.io "nginx-ingress-clusterrole" created
role.rbac.authorization.k8s.io "nginx-ingress-role" created
rolebinding.rbac.authorization.k8s.io "nginx-ingress-role-nisa-binding" created
clusterrolebinding.rbac.authorization.k8s.io "nginx-ingress-clusterrole-nisa-binding" created
deployment.apps "nginx-ingress-controller" created
Ingress Controller
ingress controller service as a load balancer service
PS D:\practices\kubernetes\app> kubectl apply -f cloud-generic.yml
service "ingress-nginx" created
Verify the service
PS D:\practices\kubernetes\app> kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx LoadBalancer 10.96.131.117 <pending> 80:31671/TCP,443:32739/TCP 11s
PS D:\practices\kubernetes\app> kubectl get svc -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
ingress-nginx LoadBalancer 10.96.131.117 129.146.88.240 80:31671/TCP,443:32739/TCP 43s
D:\practices\kubernetes\svc-comm>kubectl get secret
NAME TYPE DATA AGE
default-token-sls6g kubernetes.io/service-account-token 3 23h
ocirsecret kubernetes.io/dockerconfigjson 1 10h
D:\practices\kubernetes\svc-comm>kubectl cluster-info
Kubernetes master is running at https://c4gcmzqhezd.eu-frankfurt-1.clusters.oci.oraclecloud.com:6443
KubeDNS is running at https://c4gcmzqhezd.eu-frankfurt-1.clusters.oci.oraclecloud.com:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
D:\practices\kubernetes\svc-comm>kubectl get services kube-dns --namespace=kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.5.5 <none> 53/UDP,53/TCP 1d
D:\practices\kubernetes\svc-comm>
D:\practices\kubernetes\svc-comm>kubectl get nodes
NAME STATUS ROLES AGE VERSION
10.0.10.2 Ready node 2h v1.12.7
10.0.11.2 Ready node 2h v1.12.7
10.0.12.2 Ready node 2h v1.12.7
Delete existing services
D:\practices\kubernetes\svc-comm>kubectl delete -f demo-date-app-lb.yml
service "demo-date-app-service" deleted
deployment.apps "demo-date-app-deployment" deleted
D:\practices\kubernetes\svc-comm>kubectl delete -f ui-app-lb.yml
service "ui-app-service" deleted
deployment.apps "ui-app-deployment" deleted
D:\practices\kubernetes\svc-comm>kubectl apply -f demo-date-app-lb.yml
service "demo-date-app-service" created
deployment.apps "demo-date-app-deployment" created
D:\practices\kubernetes\svc-comm>kubectl apply -f ui-app-lb.yml
service "ui-app-service" created
deployment.apps "ui-app-deployment" created
Cluster Details
D:\practices\kubernetes\svc-comm>kubectl get pods
NAME READY STATUS RESTARTS AGE
demo-date-app-deployment-94877df6f-gs72q 1/1 Running 0 23s
ui-app-deployment-c94f8d95-2ffbw 1/1 Running 0 11s
D:\practices\kubernetes\svc-comm>kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
demo-date LoadBalancer 10.96.43.136 132.145.240.19 80:31283/TCP 2h
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 2h
ui-app LoadBalancer 10.96.120.3 132.145.244.52 80:31811/TCP 1h
D:\practices\kubernetes\svc-comm>