Creating Kubernetes Cluster (OKE) Using Terraform On Oracle Cloud Infrastructure (OCI)

Prerequisites

Refer this for more details.

OCI Tenancy

Refer this to get more details on how to get details of OCI tenancy, this would be used below in Implementation Section below.

Implementation

Terraform Project

Terraform Code

We need to create the following files

  • variable.tf
  • provider.tf
  • outputs.tf
  • datasources.tf
  • network.tf
  • cluster.tf
  • env_vars.ps1

variable.tf

# Required by the OCI Provider
variable "tenancy_ocid" {}

variable "user_ocid" {}
variable "fingerprint" {}
variable "private_key_path" {}
variable "compartment_ocid" {}

variable "ssh_public_key_file" {
  default = "~/.ssh/id_rsa.pub"
}

variable "region" {
  default = "eu-frankfurt-1"
}

# Choose an Availability Domain
variable "availability_domain" {
  default = "3"
}

variable "internet_gateway_enabled" {
  default = "true"
}

variable "worker_ol_image_name" {
  default = "Oracle-Linux-7.5"
}

variable "oke" {
  type = "map"

  default = {
    name             = "oke"
    version          = "v1.12.6"
    shape            = "VM.Standard2.2"
    nodes_per_subnet = 1
  }
}

variable "network_cidrs" {
  type = "map"

  default = {
    vcnCIDR               = "10.0.0.0/16"
    workerSubnetAD1       = "10.0.10.0/24"
    workerSubnetAD2       = "10.0.11.0/24"
    workerSubnetAD3       = "10.0.12.0/24"
    LoadBalancerSubnetAD1 = "10.0.20.0/24"
    LoadBalancerSubnetAD2 = "10.0.21.0/24"
    LoadBalancerSubnetAD3 = "10.0.22.0/24"
  }
}


provider.tf

# https://www.terraform.io/docs/configuration/providers.html
# https://www.terraform.io/docs/providers/oci/index.html
provider "oci" {
  tenancy_ocid     = "${var.tenancy_ocid}"
  user_ocid        = "${var.user_ocid}"
  fingerprint      = "${var.fingerprint}"
  private_key_path = "${var.private_key_path}"
  region           = "${var.region}"
}


outputs.tf

# https://www.terraform.io/docs/commands/output.html
# https://learn.hashicorp.com/terraform/getting-started/outputs.html
# Output the result
output "show-ads" {
  value = "${data.oci_identity_availability_domains.ADs.availability_domains}"
}


datasources.tf

# https://www.terraform.io/docs/configuration/data-sources.html
# https://www.terraform.io/docs/providers/oci/d/identity_availability_domains.html
# Gets a list of Availability Domains
data "oci_identity_availability_domains" "ADs" {
  compartment_id = "${var.compartment_ocid}"
}

# https://www.terraform.io/docs/providers/oci/d/core_images.html
data "oci_core_images" "oracle_linux_image" {
  compartment_id           = "${var.compartment_ocid}"
  operating_system         = "Oracle Linux"
  operating_system_version = "7.6"
}


network.tf

# https://www.terraform.io/docs/configuration/resources.html
# https://www.terraform.io/docs/providers/oci/r/core_vcn.html
resource "oci_core_vcn" "oke_vcn" {
  #Required
  cidr_block     = "${lookup(var.network_cidrs, "vcnCIDR")}"
  compartment_id = "${var.compartment_ocid}"

  #Optional
  dns_label    = "vcn1"
  display_name = "oke-vcn"
}

# https://www.terraform.io/docs/providers/oci/r/core_security_list.html
resource "oci_core_security_list" "oke_sl" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.oke_vcn.id}"

  egress_security_rules = [
    {
      destination = "0.0.0.0/0"
      protocol    = "all"
    },
  ]

  ingress_security_rules = [
    {
      protocol = "all"
      source   = "0.0.0.0/0"
    },
  ]

  #Optional
  display_name = "oke-sl"
}

# https://www.terraform.io/docs/providers/oci/r/core_internet_gateway.html
resource "oci_core_internet_gateway" "oke_ig" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  enabled      = "${var.internet_gateway_enabled}"
  display_name = "oke-gateway"
}

# https://www.terraform.io/docs/providers/oci/r/core_route_table.html
resource "oci_core_route_table" "oke_rt" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.oke_vcn.id}"

  route_rules {
    destination       = "0.0.0.0/0"
    network_entity_id = "${oci_core_internet_gateway.oke_ig.id}"
  }

  #Optional
  display_name = "oke-rt"
}

# https://www.terraform.io/docs/providers/oci/r/core_subnet.html
resource "oci_core_subnet" "workerSubnetAD1" {
  #Required
  cidr_block        = "${lookup(var.network_cidrs, "workerSubnetAD1")}"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.oke_sl.id}"]
  vcn_id            = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[0], "name")}"
  dhcp_options_id     = "${oci_core_vcn.oke_vcn.default_dhcp_options_id}"
  display_name        = "workerSubnetAD1"
  dns_label           = "worker1"
  route_table_id      = "${oci_core_route_table.oke_rt.id}"
}

resource "oci_core_subnet" "workerSubnetAD2" {
  #Required
  cidr_block        = "${lookup(var.network_cidrs, "workerSubnetAD2")}"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.oke_sl.id}"]
  vcn_id            = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[1], "name")}"
  dhcp_options_id     = "${oci_core_vcn.oke_vcn.default_dhcp_options_id}"
  display_name        = "workerSubnetAD2"
  dns_label           = "worker2"
  route_table_id      = "${oci_core_route_table.oke_rt.id}"
}

resource "oci_core_subnet" "workerSubnetAD3" {
  #Required
  cidr_block        = "${lookup(var.network_cidrs, "workerSubnetAD3")}"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.oke_sl.id}"]
  vcn_id            = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[2], "name")}"
  dhcp_options_id     = "${oci_core_vcn.oke_vcn.default_dhcp_options_id}"
  display_name        = "workerSubnetAD3"
  dns_label           = "worker3"
  route_table_id      = "${oci_core_route_table.oke_rt.id}"
}

resource "oci_core_subnet" "LoadBalancerSubnetAD1" {
  #Required
  cidr_block        = "${lookup(var.network_cidrs, "LoadBalancerSubnetAD1")}"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.oke_sl.id}"]
  vcn_id            = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[0], "name")}"
  dhcp_options_id     = "${oci_core_vcn.oke_vcn.default_dhcp_options_id}"
  display_name        = "LoadBalancerSubnetAD1"
  dns_label           = "loadbalancer1"
  route_table_id      = "${oci_core_route_table.oke_rt.id}"
}

resource "oci_core_subnet" "LoadBalancerSubnetAD2" {
  #Required
  cidr_block        = "${lookup(var.network_cidrs, "LoadBalancerSubnetAD2")}"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.oke_sl.id}"]
  vcn_id            = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[1], "name")}"
  dhcp_options_id     = "${oci_core_vcn.oke_vcn.default_dhcp_options_id}"
  display_name        = "LoadBalancerSubnetAD1"
  dns_label           = "loadbalancer2"
  route_table_id      = "${oci_core_route_table.oke_rt.id}"
}


cluster.tf

# https://www.terraform.io/docs/providers/oci/r/containerengine_cluster.html
resource "oci_containerengine_cluster" "k8s_cluster" {
  #Required
  compartment_id     = "${var.compartment_ocid}"
  kubernetes_version = "${var.oke["version"]}"
  name               = "${var.oke["name"]}"
  vcn_id             = "${oci_core_vcn.oke_vcn.id}"

  #Optional
  options {
    service_lb_subnet_ids = ["${oci_core_subnet.LoadBalancerSubnetAD1.id}", "${oci_core_subnet.LoadBalancerSubnetAD2.id}"]
  }
}

# https://www.terraform.io/docs/providers/oci/r/containerengine_node_pool.html
resource "oci_containerengine_node_pool" "k8s_node_pool" {
  #Required
  cluster_id         = "${oci_containerengine_cluster.k8s_cluster.id}"
  compartment_id     = "${var.compartment_ocid}"
  kubernetes_version = "${var.oke["version"]}"
  name               = "${var.oke["name"]}"
  node_image_name    = "${var.worker_ol_image_name}"
  node_shape         = "${var.oke["shape"]}"
  subnet_ids         = ["${oci_core_subnet.workerSubnetAD1.id}", "${oci_core_subnet.workerSubnetAD2.id}", "${oci_core_subnet.workerSubnetAD3.id}"]

  quantity_per_subnet = "${var.oke["nodes_per_subnet"]}"
  ssh_public_key      = "${file(var.ssh_public_key_file)}"
}

# https://www.terraform.io/docs/providers/oci/d/containerengine_cluster_kube_config.html
data "oci_containerengine_cluster_kube_config" "cluster_kube_config" {
  cluster_id    = "${oci_containerengine_cluster.k8s_cluster.id}"
  expiration    = 2592000
  token_version = "1.0.0"
}

# https://www.terraform.io/docs/providers/local/r/file.html
resource "local_file" "kubeconfig" {
  content  = "${data.oci_containerengine_cluster_kube_config.cluster_kube_config.content}"
  filename = "${path.module}/kubeconfig"
}


env_vars.ps1

### Authentication details
$env:TF_VAR_tenancy_ocid = "ocid1.tenancy.oc1..afsafsafsafsafadsfdsafdsaf"
$env:TF_VAR_user_ocid = "ocid1.user.oc1..asdfdsafdsafdsafdsafdsafdsafds"
$env:TF_VAR_fingerprint = "35:84:df:b5:fe:6b:81:25:e5:bf:b6:e2:66:66:b8:56"
$env:TF_VAR_private_key_path = "C:\Users\nadeem\.oci\oci_api_key.pem"
 
### Region
$env:TF_VAR_region  = "eu-frankfurt-1"
 
### Compartment
$env:TF_VAR_compartment_ocid  = "ocid1.compartment.oc1..safdsafdsafdsafdsafsdfsda"

execute env_vars.ps1

PS D:\practices\terraform\oke> .\env_vars.ps1
PS D:\practices\terraform\oke>


Execute terraform init

PS D:\practices\terraform\oke> terraform init

Initializing provider plugins...
- Checking for available provider plugins on https://releases.hashicorp.com...
- Downloading plugin for provider "oci" (3.20.0)...
- Downloading plugin for provider "local" (1.2.0)...

The following providers do not have any version constraints in configuration,
so the latest version was installed.

To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.

* provider.local: version = "~> 1.2"
* provider.oci: version = "~> 3.20"

Terraform has been successfully initialized!

You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.

If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
PS D:\practices\terraform\oke>

Execute terraform validate

PS D:\practices\terraform\oke> terraform validate
PS D:\practices\terraform\oke>

Execute terraform plan

Execute terraform apply

Testing

PS D:\practices\terraform\oke> kubectl --kubeconfig .\kubeconfig get nodes
NAME        STATUS    ROLES     AGE       VERSION
10.0.10.2   Ready     node      5m        v1.12.6
10.0.11.2   Ready     node      55s       v1.12.6
10.0.12.2   Ready     node      5m        v1.12.6
PS D:\practices\terraform\oke>

Clean up

Execute terraform destroy

References

Creating Bastion Compute Instance Using Terraform

Bastion host

  • Jump server to access instances in the private subnet
  • A specialized computer that is deliberately exposed on a public network
  • More on Bastion host

Design

Lets provision the following infrastructure

Implementation

Terraform Project

Terraform Code

Create variable.tf file and add the following

variable "tenancy_ocid" {}
variable "user_ocid" {}
variable "fingerprint" {}
variable "private_key_path" {}
variable "region" {}
variable "compartment_ocid" {}
 
variable "ssh_public_key_file" {
  default = "~/.ssh/id_rsa.pub"
}
 
# Choose an Availability Domain
variable "availability_domain" {
  default = "3"
}
 
variable "internet_gateway_enabled" {
  default = "true"
}
 
variable "instance_shape" {
  default = "VM.Standard2.1"
}
 
# Defines the number of instances to deploy
variable "NumInstances" {
  default = "1"
}
 
variable "BootStrapFile" {
  default = "./cloud-init/vm.cloud-config"
}
 
variable "instance_image_ocid" {
  type = "map"
 
  default = {
    // See https://docs.us-phoenix-1.oraclecloud.com/images/
    // Oracle-provided image "Oracle-Linux-7.5-2018.10.16-0"
    us-phoenix-1 = "ocid1.image.oc1.phx.aaaaaaaahu7hv6lqbdyncgwehipwsuh3htfuxcoxbl4arcetx6hzixft366a"
 
    us-ashburn-1   = "ocid1.image.oc1.iad.aaaaaaaab5l5wv7njknupfxvyynplhsygdz67uhfaz35nsnhsk3ufclqjaea"
    eu-frankfurt-1 = "ocid1.image.oc1.eu-frankfurt-1.aaaaaaaa527xpybx2azyhcz2oyk6f4lsvokyujajo73zuxnnhcnp7p24pgva"
    uk-london-1    = "ocid1.image.oc1.uk-london-1.aaaaaaaap5kk2lbo5lj3k5ff5tl755a4cszjwd6zii7jlcp6nz3gogh54wtq"
  }
}

Create providers.tf file and add the following

provider "oci" {
  tenancy_ocid     = "${var.tenancy_ocid}"
  user_ocid        = "${var.user_ocid}"
  fingerprint      = "${var.fingerprint}"
  private_key_path = "${var.private_key_path}"
  region           = "${var.region}"
}

Refer this for more detail on Providers

Create env_vars.ps1 file and add the following (Windows)

### Authentication details
$env:TF_VAR_tenancy_ocid = "ocid1.tenancy.oc1..asfdsafsaffsafsadfdsafdsafsda"
$env:TF_VAR_user_ocid = "ocid1.user.oc1..asfdsafdsafdsafdsaf"
$env:TF_VAR_private_key_path = "C:\Users\nadeem.oci\oci_api_key.pem"
$env:TF_VAR_fingerprint = "5d:01:f7:11:95:96:6b:94:a1:90:ae:e8:09:59:b3:b1"
$env:TF_VAR_private_key_path = "C:\Users\nadeem\.oci\oci_api_key.pem"
$env:TF_VAR_ssh_public_key = "C:\Users\nadeem\.oci\oci_api_key.pem"
  
### Region
$env:TF_VAR_region  = "eu-frankfurt-1"
  
### Compartment
$env:TF_VAR_compartment_ocid  = "ocid1.compartment.oc1..asfdsafdsfsf"

execute env_vars.ps1

PS D:\practices\terraform\bastion> .\env_vars.ps1
PS D:\practices\terraform\bastion> $env:TF_VAR_region
eu-frankfurt-1
PS D:\practices\terraform\bastion> $env:TF_VAR_compartment_ocid
ocid1.compartment.oc1..asfsafsafddsafdsafdsafsafdsaf
PS D:\practices\terraform\bastion> $env:TF_VAR_private_key_path
C:\Users\nadeem.ORADEV\.oci\oci_api_key.pem
PS D:\practices\terraform\bastion> $env:TF_VAR_tenancy_ocid
ocid1.tenancy.oc1..afdsafdsafdsafdsafds
PS D:\practices\terraform\bastion>

Create datasources.tf file and add the following

# Gets a list of Availability Domains
data "oci_identity_availability_domains" "ADs" {
  compartment_id = "${var.tenancy_ocid}"
}
 
data "oci_core_images" "oracle_linux_image" {
  compartment_id           = "${var.tenancy_ocid}"
  operating_system         = "Oracle Linux"
  operating_system_version = "7.6"
}

Refer this for more details on locals

Refer this for more details on datasource oci_identity_availability_domains

Create network.tf file and add the following

resource "oci_core_vcn" "terraform_vcn" {
  #Required
  cidr_block     = "10.0.0.0/16"
  compartment_id = "${var.compartment_ocid}"
 
  #Optional
  dns_label    = "vcn1"
  display_name = "terraform-vcn"
}

Refer this for more details on resource oci_core_vcn

Add the following to network.tf

resource "oci_core_security_list" "terraform_sl" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.terraform_vcn.id}"
 
  egress_security_rules = [
    {
      destination = "0.0.0.0/0"
      protocol    = "all"
    },
  ]
 
  ingress_security_rules = [
    {
      protocol = "6"
      source   = "0.0.0.0/0"
 
      tcp_options {
        "max" = 22
        "min" = 22
      }
    },
    {
      protocol = "6"
      source   = "0.0.0.0/0"
 
      tcp_options {
        "max" = 80
        "min" = 80
      }
    },
  ]
 
  #Optional
  display_name = "terraform-sl"
}

Refer this for more detail on oci_core_security_list

Add the following to network.tf

resource "oci_core_internet_gateway" "terraform_ig" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.terraform_vcn.id}"
 
  #Optional
  enabled      = "${var.internet_gateway_enabled}"
  display_name = "terraform-gateway"
}

Refer this for more detail on oci_core_internet_gateway

Add the following to network.tf

resource "oci_core_route_table" "terraform_rt" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.terraform_vcn.id}"
 
  route_rules {
    destination       = "0.0.0.0/0"
    network_entity_id = "${oci_core_internet_gateway.terraform_ig.id}"
  }
 
  #Optional
  display_name = "terraform-rt"
}

Refer this for more detail on oci_core_route_table

Add the following to network.tf

resource "oci_core_subnet" "terraform_subnet" {
  #Required
  cidr_block        = "10.0.0.0/30"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.terraform_sl.id}"]
  vcn_id            = "${oci_core_vcn.terraform_vcn.id}"
 
  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain - 1], "name")}"
  dhcp_options_id     = "${oci_core_vcn.terraform_vcn.default_dhcp_options_id}"
  display_name        = "terraform_subnet"
  dns_label           = "terraformSubnet"
  route_table_id      = "${oci_core_route_table.terraform_rt.id}"
}

Refer this for more detail on oci_core_subnet

Here is the final network.tf

resource "oci_core_vcn" "terraform_vcn" {
  #Required
  cidr_block     = "10.0.0.0/16"
  compartment_id = "${var.compartment_ocid}"
 
  #Optional
  dns_label    = "vcn1"
  display_name = "terraform-vcn"
}
 
resource "oci_core_security_list" "terraform_sl" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.terraform_vcn.id}"
 
  egress_security_rules = [
    {
      destination = "0.0.0.0/0"
      protocol    = "all"
    },
  ]
 
  ingress_security_rules = [
    {
      protocol = "6"
      source   = "0.0.0.0/0"
 
      tcp_options {
        "max" = 22
        "min" = 22
      }
    },
    {
      protocol = "6"
      source   = "0.0.0.0/0"
 
      tcp_options {
        "max" = 80
        "min" = 80
      }
    },
  ]
 
  #Optional
  display_name = "terraform-sl"
}
 
resource "oci_core_internet_gateway" "terraform_ig" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.terraform_vcn.id}"
 
  #Optional
  enabled      = "${var.internet_gateway_enabled}"
  display_name = "terraform-gateway"
}
 
resource "oci_core_route_table" "terraform_rt" {
  #Required
  compartment_id = "${var.compartment_ocid}"
  vcn_id         = "${oci_core_vcn.terraform_vcn.id}"
 
  route_rules {
    destination       = "0.0.0.0/0"
    network_entity_id = "${oci_core_internet_gateway.terraform_ig.id}"
  }
 
  #Optional
  display_name = "terraform-rt"
}
 
resource "oci_core_subnet" "terraform_subnet" {
  #Required
  cidr_block        = "10.0.0.0/30"
  compartment_id    = "${var.compartment_ocid}"
  security_list_ids = ["${oci_core_security_list.terraform_sl.id}"]
  vcn_id            = "${oci_core_vcn.terraform_vcn.id}"
 
  #Optional
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain - 1], "name")}"
  dhcp_options_id     = "${oci_core_vcn.terraform_vcn.default_dhcp_options_id}"
  display_name        = "terraform_subnet"
  dns_label           = "terraformSubnet"
  route_table_id      = "${oci_core_route_table.terraform_rt.id}"
}

Create compute.tf file and add the following

resource "oci_core_instance" "Bastion" {
  #Required 
  availability_domain = "${lookup(data.oci_identity_availability_domains.ADs.availability_domains[var.availability_domain - 1],"name")}"
  compartment_id      = "${var.compartment_ocid}"
  shape               = "${var.instance_shape}"
 
  #Optional
  count        = "${var.NumInstances}"
  display_name = "Bastion${count.index}"
 
  create_vnic_details {
    #Required 
    subnet_id = "${oci_core_subnet.terraform_subnet.id}"
 
    #Optional
    display_name     = "primaryvnic"
    assign_public_ip = true
    hostname_label   = "Bastion${count.index}"
    private_ip       = "10.0.0.2"
  }
 
  source_details {
    source_type = "image"
    source_id   = "${"${var.instance_image_ocid[var.region]}"}"
 
    # Apply this to set the size of the boot volume that's created for this instance.
    # Otherwise, the default boot volume size of the image is used.
    # This should only be specified when source_type is set to "image".
    #boot_volume_size_in_gbs = "60"
  }
 
  # Apply the following flag only if you wish to preserve the attached boot volume upon destroying this instance
  # Setting this and destroying the instance will result in a boot volume that should be managed outside of this config.
  # When changing this value, make sure to run 'terraform apply' so that it takes effect before the resource is destroyed.
  #preserve_boot_volume = true
 
  metadata {
    ssh_authorized_keys = "${file(var.ssh_public_key_file)}"
    user_data           = "${base64encode(file(var.BootStrapFile))}"
  }
  timeouts {
    create = "60m"
  }
}

Refer this for more detail on oci_core_instance

create file cloud-init/vm.cloud-config as follows

#!/bin/bash
yum update -y

Couple of more cloud-init sample files

Execution

Lets execute terraform init

PS D:\practices\terraform\bastion> terraform init
 
Initializing provider plugins...
 
The following providers do not have any version constraints in configuration,
so the latest version was installed.
 
To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.
 
* provider.oci: version = "~> 3.16"
 
Terraform has been successfully initialized!
 
You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.
 
If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.
PS D:\practices\terraform\bastion>

Lets execute terraform validate

PS D:\practices\terraform\bastion> terraform validate

Lets execute terraform plan

PS D:\practices\terraform\bastion> terraform plan

output

PS D:\practices\terraform\bastion> terraform plan
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
 
data.oci_core_images.oracle_linux_image: Refreshing state...
data.oci_identity_availability_domains.ADs: Refreshing state...
 
------------------------------------------------------------------------
 
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
  + create
 <= read (data resources)
 
Terraform will perform the following actions:
 
 <= data.oci_core_images.oracle_linux_image
      id:                                                                  <computed>
      compartment_id:                                                      "ocid1.tenancy.oc1..sadfsadfdsafdsafdasdaf"
      images.#:                                                            <computed>
      operating_system:                                                    "Oracle Linux"
      operating_system_version:                                            "7.6"
 
  + oci_core_instance.Bastion
      id:                                                                  <computed>
      availability_domain:                                                 "iOTX:EU-FRANKFURT-1-AD-3"
      boot_volume_id:                                                      <computed>
      compartment_id:                                                      "ocid1.compartment.oc1..asfsafdsafsafsadf"
      create_vnic_details.#:                                               "1"
      create_vnic_details.0.assign_public_ip:                              "true"
      create_vnic_details.0.display_name:                                  "primaryvnic"
      create_vnic_details.0.freeform_tags.%:                               <computed>
      create_vnic_details.0.hostname_label:                                "Bastion0"
      create_vnic_details.0.private_ip:                                    "10.0.0.2"
      create_vnic_details.0.skip_source_dest_check:                        <computed>
      create_vnic_details.0.subnet_id:                                     "${oci_core_subnet.terraform_subnet.id}"
      display_name:                                                        "Bastion0"
      freeform_tags.%:                                                     <computed>
      image:                                                               <computed>
      ipxe_script:                                                         <computed>
      is_pv_encryption_in_transit_enabled:                                 <computed>
      launch_mode:                                                         <computed>
      launch_options.#:                                                    <computed>
      metadata.%:                                                          "2"
      metadata.ssh_authorized_keys:                                        "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC0qDKiM8iX0iz3jUXZwf2AFwKNs0UNelq6ValCYRI7nr6yyBclQDRvBP88Lyqm6Umhtu8N0qMftdjcC7rgoUXl18mDHzeEq/k2mklzT+vuzYFgbuj50mNM6YoNzucqxNIRp49Zvav2BA2oIH8XE1pZwnX7Cfu2FSxRB9Udi68nQQR6KIyzBOCmZKxvP1u+kPzJssp/wTbggHQfsRfdtJQloU10m04yHJC5uzoHOtGEVgjuXktykAzvX3bhac1NCVPc2U6xZEMTMfmb3ornYqv1w3wN49dXtmYpvIpK4HZ+ai02F4n3lN3Jy0SejJPDMoYJWsFySDas59SOxw/rD1Vp nadeem@nadeem-LAP\n"
      metadata.user_data:                                                  "IyEvYmluL2Jhc2gNCnl1bSB1cGRhdGUgLXk="
      private_ip:                                                          <computed>
      public_ip:                                                           <computed>
      region:                                                              <computed>
      shape:                                                               "VM.Standard2.1"
      source_details.#:                                                    "1"
      source_details.0.boot_volume_size_in_gbs:                            <computed>
      source_details.0.kms_key_id:                                         <computed>
      source_details.0.source_id:                                          "ocid1.image.oc1.eu-frankfurt-1.aaaaaaaa527xpybx2azyhcz2oyk6f4lsvokyujajo73zuxnnhcnp7p24pgva"
      source_details.0.source_type:                                        "image"
      state:                                                               <computed>
      subnet_id:                                                           <computed>
      time_created:                                                        <computed>
      time_maintenance_reboot_due:                                         <computed>
 
  + oci_core_internet_gateway.terraform_ig
      id:                                                                  <computed>
      compartment_id:                                                      "ocid1.compartment.oc1..asfsadfsadfsafsf"
      display_name:                                                        "terraform-gateway"
      enabled:                                                             "true"
      freeform_tags.%:                                                     <computed>
      state:                                                               <computed>
      time_created:                                                        <computed>
      time_modified:                                                       <computed>
      vcn_id:                                                              "${oci_core_vcn.terraform_vcn.id}"
 
  + oci_core_route_table.terraform_rt
      id:                                                                  <computed>
      compartment_id:                                                      "ocid1.compartment.oc1..asfsadfsadfsafsf"
      display_name:                                                        "terraform-rt"
      freeform_tags.%:                                                     <computed>
      route_rules.#:                                                       "1"
      route_rules.~1282495351.cidr_block:                                  <computed>
      route_rules.~1282495351.destination:                                 "0.0.0.0/0"
      route_rules.~1282495351.destination_type:                            <computed>
      route_rules.~1282495351.network_entity_id:                           "${oci_core_internet_gateway.terraform_ig.id}"
      state:                                                               <computed>
      time_created:                                                        <computed>
      time_modified:                                                       <computed>
      vcn_id:                                                              "${oci_core_vcn.terraform_vcn.id}"
 
  + oci_core_security_list.terraform_sl
      id:                                                                  <computed>
      compartment_id:                                                      "ocid1.compartment.oc1..asfsadfsadfsafsf"
      display_name:                                                        "terraform-sl"
      egress_security_rules.#:                                             "1"
      egress_security_rules.1582479153.destination:                        "0.0.0.0/0"
      egress_security_rules.1582479153.destination_type:                   <computed>
      egress_security_rules.1582479153.icmp_options.#:                     "0"
      egress_security_rules.1582479153.protocol:                           "all"
      egress_security_rules.1582479153.stateless:                          <computed>
      egress_security_rules.1582479153.tcp_options.#:                      "0"
      egress_security_rules.1582479153.udp_options.#:                      "0"
      freeform_tags.%:                                                     <computed>
      ingress_security_rules.#:                                            "2"
      ingress_security_rules.3861548008.icmp_options.#:                    "0"
      ingress_security_rules.3861548008.protocol:                          "6"
      ingress_security_rules.3861548008.source:                            "0.0.0.0/0"
      ingress_security_rules.3861548008.source_type:                       <computed>
      ingress_security_rules.3861548008.stateless:                         "false"
      ingress_security_rules.3861548008.tcp_options.#:                     "1"
      ingress_security_rules.3861548008.tcp_options.0.max:                 "80"
      ingress_security_rules.3861548008.tcp_options.0.min:                 "80"
      ingress_security_rules.3861548008.tcp_options.0.source_port_range.#: "0"
      ingress_security_rules.3861548008.udp_options.#:                     "0"
      ingress_security_rules.47193274.icmp_options.#:                      "0"
      ingress_security_rules.47193274.protocol:                            "6"
      ingress_security_rules.47193274.source:                              "0.0.0.0/0"
      ingress_security_rules.47193274.source_type:                         <computed>
      ingress_security_rules.47193274.stateless:                           "false"
      ingress_security_rules.47193274.tcp_options.#:                       "1"
      ingress_security_rules.47193274.tcp_options.0.max:                   "22"
      ingress_security_rules.47193274.tcp_options.0.min:                   "22"
      ingress_security_rules.47193274.tcp_options.0.source_port_range.#:   "0"
      ingress_security_rules.47193274.udp_options.#:                       "0"
      state:                                                               <computed>
      time_created:                                                        <computed>
      vcn_id:                                                              "${oci_core_vcn.terraform_vcn.id}"
 
  + oci_core_subnet.terraform_subnet
      id:                                                                  <computed>
      availability_domain:                                                 "iOTX:EU-FRANKFURT-1-AD-3"
      cidr_block:                                                          "10.0.0.0/30"
      compartment_id:                                                      "ocid1.compartment.oc1..aaaaaaaawbggxfhsizoqfpctlcubqi7hu63xiwzpxyyant625526x3zgxlga"
      dhcp_options_id:                                                     "${oci_core_vcn.terraform_vcn.default_dhcp_options_id}"
      display_name:                                                        "terraform_subnet"
      dns_label:                                                           "terraformSubnet"
      freeform_tags.%:                                                     <computed>
      prohibit_public_ip_on_vnic:                                          <computed>
      route_table_id:                                                      "${oci_core_route_table.terraform_rt.id}"
      security_list_ids.#:                                                 <computed>
      state:                                                               <computed>
      subnet_domain_name:                                                  <computed>
      time_created:                                                        <computed>
      vcn_id:                                                              "${oci_core_vcn.terraform_vcn.id}"
      virtual_router_ip:                                                   <computed>
      virtual_router_mac:                                                  <computed>
 
  + oci_core_vcn.terraform_vcn
      id:                                                                  <computed>
      cidr_block:                                                          "10.0.0.0/16"
      compartment_id:                                                      "ocid1.compartment.oc1..aaaaaaaawbggxfhsizoqfpctlcubqi7hu63xiwzpxyyant625526x3zgxlga"
      default_dhcp_options_id:                                             <computed>
      default_route_table_id:                                              <computed>
      default_security_list_id:                                            <computed>
      display_name:                                                        "terraform-vcn"
      dns_label:                                                           "vcn1"
      freeform_tags.%:                                                     <computed>
      state:                                                               <computed>
      time_created:                                                        <computed>
      vcn_domain_name:                                                     <computed>
 
 
Plan: 6 to add, 0 to change, 0 to destroy.
 
------------------------------------------------------------------------
 
Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.

Lets execute terraform apply

PS D:\practices\terraform\bastion> terraform apply

Testing

References

Creating VCN Using Terraform On Oracle Cloud Infrastructure (OCI)

We need the following

  • OCI Tenancy
  • Terraform Installation
  • Terraform OCI provider
  • API SSL Key pair (Alternatively you can install OCI CLI)
  • Terraform instructions

Prerequisites

Refer this for more details.

OCI Tenancy

Refer this to get more details on how to get details of OCI tenancy, this would be used below in Terraform Instructions Section below.

Terraform Instructions

Create a file vcn.tf, and add the following content

variable "tenancy_ocid" {}
variable "user_ocid" {}
variable "fingerprint" {}
variable "private_key_path" {}
variable "compartment_ocid" {}
variable "region" {}

provider "oci" {
  tenancy_ocid     = "${var.tenancy_ocid}"
  user_ocid        = "${var.user_ocid}"
  fingerprint      = "${var.fingerprint}"
  private_key_path = "${var.private_key_path}"
  region           = "${var.region}"
}

resource "oci_core_virtual_network" "terraform-vcn" {
  cidr_block     = "10.0.0.0/16"
  dns_label      = "vcn1"
  compartment_id = "${var.compartment_ocid}"
  display_name   = "terraform-vcn"
}


Create a file env-vars.ps1, and add the following content

### Authentication details
setx TF_VAR_tenancy_ocid ocid1.tenancy.oc1..sfdsafdsafdsafdsafdsafdsafdsafdsafdsaf
setx TF_VAR_user_ocid ocid1.user.oc1..afdsafdsafdsafdsafdsafasdfdsafdsafsafdasdfaasdfsdafdsa
setx TF_VAR_fingerprint 5d:01:f7:11:33:96:6b:84:a1:64:ae:e9:09:59:b3:a4
setx TF_VAR_private_key_path C:\Users\nadeem\.oci\oci_api_key.pem

### Region
setx TF_VAR_region eu-frankfurt-1

### Compartment
setx TF_VAR_compartment_ocid ocid1.compartment.oc1..asfdsafdsfdsafdsafsdafdsafdsafdsafsad

Open powershell and execute

PS D:\practices\terraform\vcn> .\env-vars.ps1

SUCCESS: Specified value was saved.

SUCCESS: Specified value was saved.

SUCCESS: Specified value was saved.

SUCCESS: Specified value was saved.

SUCCESS: Specified value was saved.

SUCCESS: Specified value was saved.


execute terraform init

PS D:\practices\terraform\vcn> terraform init
Terraform initialized in an empty directory!

The directory has no Terraform configuration files. You may begin working
with Terraform immediately by creating Terraform configuration files.
PS D:\practices\terraform\vcn> terraform init

Initializing provider plugins...
- Checking for available provider plugins on https://releases.hashicorp.com...
- Downloading plugin for provider "oci" (3.16.0)...

The following providers do not have any version constraints in configuration,
so the latest version was installed.

To prevent automatic upgrades to new major versions that may contain breaking
changes, it is recommended to add version = "..." constraints to the
corresponding provider blocks in configuration, with the constraint strings
suggested below.

* provider.oci: version = "~> 3.16"

Terraform has been successfully initialized!

You may now begin working with Terraform. Try running "terraform plan" to see
any changes that are required for your infrastructure. All Terraform commands
should now work.

If you ever set or change modules or backend configuration for Terraform,
rerun this command to reinitialize your working directory. If you forget, other
commands will detect it and remind you to do so if necessary.

Execute terraform plan

PS D:\practices\terraform\vcn> terraform plan
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.


------------------------------------------------------------------------

An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
  + create

Terraform will perform the following actions:

  + oci_core_virtual_network.terraform-vcn
      id:                       <computed>
      cidr_block:               "10.0.0.0/16"
      compartment_id:           "ocid1.compartment.oc1..sfadsfsaaaafasdfdsafsadfdsfdsafdsafsadfs"
      default_dhcp_options_id:  <computed>
      default_route_table_id:   <computed>
      default_security_list_id: <computed>
      display_name:             "terraform-vcn"
      dns_label:                "vcn1"
      freeform_tags.%:          <computed>
      state:                    <computed>
      time_created:             <computed>
      vcn_domain_name:          <computed>


Plan: 1 to add, 0 to change, 0 to destroy.

------------------------------------------------------------------------

Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.

Execute terraform apply

PS D:\practices\terraform\vcn> terraform apply

An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
  + create

Terraform will perform the following actions:

  + oci_core_virtual_network.terraform-vcn
      id:                       <computed>
      cidr_block:               "10.0.0.0/16"
      compartment_id:           "ocid1.compartment.oc1..afdsafdsafdsafdsafsadfdsafdsaf"
      default_dhcp_options_id:  <computed>
      default_route_table_id:   <computed>
      default_security_list_id: <computed>
      display_name:             "terraform-vcn"
      dns_label:                "vcn1"
      freeform_tags.%:          <computed>
      state:                    <computed>
      time_created:             <computed>
      vcn_domain_name:          <computed>


Plan: 1 to add, 0 to change, 0 to destroy.

Do you want to perform these actions?
  Terraform will perform the actions described above.
  Only 'yes' will be accepted to approve.

  Enter a value: yes

oci_core_virtual_network.terraform-vcn: Creating...
  cidr_block:               "" => "10.0.0.0/16"
  compartment_id:           "" => "ocid1.compartment.oc1..afdsafdsafdsafsadfsd"
  default_dhcp_options_id:  "" => "<computed>"
  default_route_table_id:   "" => "<computed>"
  default_security_list_id: "" => "<computed>"
  display_name:             "" => "terraform-vcn"
  dns_label:                "" => "vcn1"
  freeform_tags.%:          "" => "<computed>"
  state:                    "" => "<computed>"
  time_created:             "" => "<computed>"
  vcn_domain_name:          "" => "<computed>"
oci_core_virtual_network.terraform-vcn: Creation complete after 8s (ID: ocid1.vcn.oc1.eu-frankfurt-1.aaaaaaaayl...afsafdsfdsfsdafdsa)

Apply complete! Resources: 1 added, 0 changed, 0 destroyed.
PS D:\practices\terraform\vcn>

VCN created

Refer this for more examples

Refer the official docs for vcn for more details

Clean UP

Execute terraform destroy

D:\practices\terraform\vcn>terraform destroy
oci_core_virtual_network.terraform-vcn: Refreshing state... (ID: ocid1.vcn.oc1.eu-frankfurt-1.aaaaaaaayl...asfdsafsdafsdfsda)

An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
  - destroy

Terraform will perform the following actions:

  - oci_core_virtual_network.terraform-vcn


Plan: 0 to add, 0 to change, 1 to destroy.

Do you really want to destroy all resources?
  Terraform will destroy all your managed infrastructure, as shown above.
  There is no undo. Only 'yes' will be accepted to confirm.

  Enter a value: yes

oci_core_virtual_network.terraform-vcn: Destroying... (ID: ocid1.vcn.oc1.eu-frankfurt-1.aaaaaaaayl...asfdsafsadfdsafsa)
oci_core_virtual_network.terraform-vcn: Destruction complete after 1s

Destroy complete! Resources: 1 destroyed.

D:\practices\terraform\vcn>

VCN is deleted

Terraform Console

D:\practices\terraform\vcn>terraform console
[J> var.region
eu-frankfurt-1
[J> var.fingerprint
5d:01:f7:22:95:96:6b:84:d2:60:er:e9:09:33:c3:b1
[J>

References

Single control-plane Kubernetes cluster with kubeadm on Oracle Cloud Infrastructure (OCI)

Not For Production Use

Just for Practice Purpose

In Production either Use OKE or Setup HA Cluster

Step 1 : Create Two Public Compute Instances

Follow this to create compute instances

Step 2 : Configure Master Node

Disable SELinux

Temporary

[opc@k8s-master ~]$ sudo su -
[root@k8s-master ~]# setenforce 0
[root@k8s-master ~]# getenforce
Permissive

Permanent

Edit the /etc/selinux/config file and change the SELINUX=enforcing line to SELINUX=permissive

[opc@k8s-master ~]$ cat /etc/selinux/config
 
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected.
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

Disable Firewall

[root@k8s-master ~]# systemctl disable firewalld && systemctl stop firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

Add Kubernetes Repo

[root@k8s-master ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

Ensure net.bridge.bridge-nf-call-iptables is set to 1

[root@k8s-master ~]# sysctl -w net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-iptables = 1
[root@k8s-master ~]# echo "net.bridge.bridge-nf-call-iptables=1" > /etc/sysctl.d/k8s.conf

Alternatively

# cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

Install Docker and Kubernetes

[root@k8s-master ~]# yum install -y docker-engine kubelet kubeadm kubectl kubernetes-cni

Enable the docker and the kubelet services

[root@k8s-master ~]# systemctl enable docker && systemctl start docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

[root@k8s-master ~]# systemctl enable kubelet && systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

Disable Swap

[root@k8s-worker ~]# swapoff -a &&  sed -i '/ swap / s/^/#/' /etc/fstab

Initialize Master

Wipe your current cluster installation (If any)

[root@k8s-master ~]# kubeadm reset -f && rm -rf /etc/kubernetes/

Reset Output

[root@k8s-master ~]# kubeadm reset -f && rm -rf /etc/kubernetes/
[reset] Reading configuration from the cluster...
[reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[preflight] Running pre-flight checks
[reset] Removing info for node "k8s-master" from the ConfigMap "kubeadm-config" in the "kube-system" Namespace
W0807 17:52:28.097752   17828 removeetcdmember.go:61] [reset] failed to remove etcd member: error syncing endpoints with etc: etcdclient: no available endpoints
.Please manually remove this etcd member using etcdctl
[reset] Stopping the kubelet service
[reset] Unmounting mounted directories in "/var/lib/kubelet"
[reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
[reset] Deleting contents of stateful directories: [/var/lib/etcd /var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
 
The reset process does not reset or clean up iptables rules or IPVS tables.
If you wish to reset iptables, you must do so manually.
For example:
iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
 
If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar)
to reset your system's IPVS tables.
 
The reset process does not clean your kubeconfig files and you must remove them manually.
Please, check the contents of the $HOME/.kube/config file.

Initialize cluster

[root@k8s-master ~]# kubeadm init

kubeadm init output

[init] Using Kubernetes version: v1.15.2
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.0.0.6]
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [10.0.0.6 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [10.0.0.6 127.0.0.1 ::1]
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.503514 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.15" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 9f9xuf.su3t8exlnqejevtd
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
 
Your Kubernetes control-plane has initialized successfully!
 
To start using your cluster, you need to run the following as a regular user:
 
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
 
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/
 
Then you can join any number of worker nodes by running the following on each as root:
 
kubeadm join 10.0.0.6:6443 --token 9f9xuf.su3t8exlnqejevtd \
    --discovery-token-ca-cert-hash sha256:6c61d0f6239d61af6de250abbce3f37122298be6a6cc27d05766128a0b844181

Copy the last two lines we need it later

Kubeadm has deployed all the necessary Control Plane components, including etcd, the API server, kube-proxy, Scheduler, and Controller Manager

[root@k8s-master ~]#  export KUBECONFIG=/etc/kubernetes/admin.conf

[root@k8s-master ~]# kubectl get po -n kube-system
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-5c98db65d4-n2htv             0/1     Pending   0          64s
coredns-5c98db65d4-qnvxr             0/1     Pending   0          64s
etcd-k8s-master                      1/1     Running   0          15s
kube-apiserver-k8s-master            0/1     Pending   0          2s
kube-controller-manager-k8s-master   0/1     Pending   0          9s
kube-proxy-hllq2                     1/1     Running   0          64s
kube-scheduler-k8s-master            0/1     Pending   0          9s
 
[root@k8s-master ~]# kubectl get po -n kube-system
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-5c98db65d4-n2htv             0/1     Pending   0          74s
coredns-5c98db65d4-qnvxr             0/1     Pending   0          74s
etcd-k8s-master                      1/1     Running   0          25s
kube-apiserver-k8s-master            1/1     Running   0          12s
kube-controller-manager-k8s-master   1/1     Running   0          19s
kube-proxy-hllq2                     1/1     Running   0          74s
kube-scheduler-k8s-master    

[root@k8s-master ~]# kubectl describe pod coredns-5c98db65d4-n2htv -n kube-system
Name:                 coredns-5c98db65d4-n2htv
Namespace:            kube-system
Priority:             2000000000
Priority Class Name:  system-cluster-critical
Node:                 <none>
Labels:               k8s-app=kube-dns
                      pod-template-hash=5c98db65d4
Annotations:          <none>
Status:               Pending
IP:
Controlled By:        ReplicaSet/coredns-5c98db65d4
Containers:
  coredns:
    Image:       k8s.gcr.io/coredns:1.3.1
    Ports:       53/UDP, 53/TCP, 9153/TCP
    Host Ports:  0/UDP, 0/TCP, 0/TCP
    Args:
      -conf
      /etc/coredns/Corefile
    Limits:
      memory:  170Mi
    Requests:
      cpu:        100m
      memory:     70Mi
    Liveness:     http-get http://:8080/health delay=60s timeout=5s period=10s #success=1 #failure=5
    Readiness:    http-get http://:8080/health delay=0s timeout=1s period=10s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/coredns from config-volume (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from coredns-token-4jpfz (ro)
Conditions:
  Type           Status
  PodScheduled   False
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      coredns
    Optional:  false
  coredns-token-4jpfz:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  coredns-token-4jpfz
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  beta.kubernetes.io/os=linux
Tolerations:     CriticalAddonsOnly
                 node-role.kubernetes.io/master:NoSchedule
                 node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason            Age                 From               Message
  ----     ------            ----                ----               -------
  Warning  FailedScheduling  6s (x19 over 106s)  default-scheduler  0/1 nodes are available: 1 node(s) had taints that the pod didn't tolerate.

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   NotReady    master   3m57s   v1.15.2
[root@k8s-master ~]# 

#

Kubelet isn’t fully ready yet, because the container network (CNI) plugin isn’t installed yet. lets deploy Weave Net container networking plugin. Several alternatives are also available

[root@k8s-master ~]# kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl  version | base64 | tr -d '\n')"
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.extensions/weave-net created

[root@k8s-master ~]# kubectl get po -n kube-system
NAME                                 READY   STATUS              RESTARTS   AGE
coredns-5c98db65d4-n2htv             0/1     ContainerCreating   0          2m49s
coredns-5c98db65d4-qnvxr             0/1     ContainerCreating   0          2m49s
etcd-k8s-master                      1/1     Running             0          2m
kube-apiserver-k8s-master            1/1     Running             0          107s
kube-controller-manager-k8s-master   1/1     Running             0          114s
kube-proxy-hllq2                     1/1     Running             0          2m49s
kube-scheduler-k8s-master            1/1     Running             0          114s
weave-net-vl7pv                      2/2     Running             0          13s
[root@k8s-master ~]# kubectl get po -n kube-system
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-5c98db65d4-n2htv             0/1     Running   0          2m52s
coredns-5c98db65d4-qnvxr             0/1     Running   0          2m52s
etcd-k8s-master                      1/1     Running   0          2m3s
kube-apiserver-k8s-master            1/1     Running   0          110s
kube-controller-manager-k8s-master   1/1     Running   0          117s
kube-proxy-hllq2                     1/1     Running   0          2m52s
kube-scheduler-k8s-master            1/1     Running   0          117s
weave-net-vl7pv                      2/2     Running   0          16s
[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE     VERSION
k8s-master   Ready    master   3m57s   v1.15.2
[root@k8s-master ~] #

Step 3 : Configure Worker Node

Disable SELinux

Temporary

[opc@k8s-worker ~]$ sudo su -
[root@k8s-worker ~]# setenforce 0
[root@k8s-worker ~]# getenforce
Permissive

Permanent

Edit the /etc/selinux/config file and change the SELINUX=enforcing line to SELINUX=permissive

[opc@k8s-worker ~]$ cat /etc/selinux/config
  
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
#     enforcing - SELinux security policy is enforced.
#     permissive - SELinux prints warnings instead of enforcing.
#     disabled - No SELinux policy is loaded.
SELINUX=permissive
# SELINUXTYPE= can take one of three values:
#     targeted - Targeted processes are protected,
#     minimum - Modification of targeted policy. Only selected processes are protected.
#     mls - Multi Level Security protection.
SELINUXTYPE=targeted

Disable Firewall

[root@k8s-worker ~]# systemctl disable firewalld && systemctl stop firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.

Add Kubernetes Repo

[root@k8s-worker ~]# cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF

[root@k8s-worker ~]# cat /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
        https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

Ensure net.bridge.bridge-nf-call-iptables is set to 1

[root@k8s-worker ~]# sysctl -w net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-iptables = 1

[root@k8s-worker ~]# echo "net.bridge.bridge-nf-call-iptables=1" > /etc/sysctl.d/k8s.conf

Following should also work

# cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system

Disable Swap

[root@k8s-worker ~]# swapoff -a &&  sed -i '/ swap / s/^/#/' /etc/fstab

Install Docker and Kubernetes

[root@k8s-worker ~]# yum install -y docker-engine  kubelet kubeadm kubectl kubernetes-cni

Enable the docker and the kubelet services

[root@k8s-worker ~]# systemctl enable docker && systemctl start docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.

[root@k8s-worker ~]# systemctl enable kubelet && systemctl start kubelet
Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.

Join this node

[root@k8s-worker ~]# kubeadm join 10.0.0.6:6443 --token 9f9xuf.su3t8exlnqejevtd \
>     --discovery-token-ca-cert-hash sha256:6c61d0f6239d61af6de250abbce3f37122298be6a6cc27d05766128a0b844181
[preflight] Running pre-flight checks
        [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.15" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Activating the kubelet service
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
 
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
 
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
 
[root@k8s-worker ~]#

Step 4 : Testing

[opc@k8s-master ~]$ sudo su -
Last login: Wed Aug  7 16:50:02 GMT 2019 on pts/0
[root@k8s-master ~]# export KUBECONFIG=/etc/kubernetes/admin.conf

[root@k8s-master ~]# kubectl get nodes
NAME         STATUS   ROLES    AGE   VERSION
k8s-master   Ready    master   8h    v1.15.2
k8s-worker   Ready    <none>   96s   v1.15.2

[root@k8s-master ~]# kubectl get po --all-namespaces
NAMESPACE     NAME                                 READY   STATUS    RESTARTS   AGE
kube-system   coredns-5c98db65d4-n2htv             1/1     Running   0          8h
kube-system   coredns-5c98db65d4-qnvxr             1/1     Running   0          8h
kube-system   etcd-k8s-master                      1/1     Running   0          8h
kube-system   kube-apiserver-k8s-master            1/1     Running   0          8h
kube-system   kube-controller-manager-k8s-master   1/1     Running   0          8h
kube-system   kube-proxy-hllq2                     1/1     Running   0          8h
kube-system   kube-proxy-rcm5k                     1/1     Running   0          2m3s
kube-system   kube-scheduler-k8s-master            1/1     Running   0          8h
kube-system   weave-net-p7md5                      2/2     Running   0          2m3s
kube-system   weave-net-vl7pv                      2/2     Running   0          8h

Create an nginx deployment

[root@k8s-master ~]# kubectl run nginx --image=nginx --port=80 --replicas=2
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created

[root@k8s-master ~]# kubectl get pods -o wide
NAME                     READY   STATUS    RESTARTS   AGE   IP          NODE         NOMINATED NODE   READINESS GATES
nginx-7c45b84548-9lxn7   1/1     Running   0          25s   10.44.0.1   k8s-worker   <none>           <none>
nginx-7c45b84548-z5gwx   1/1     Running   0          25s   10.44.0.2   k8s-worker   <none>           <none>

Create a service to connect to your nginx deployment.

[root@k8s-master ~]# kubectl expose deployment nginx --type NodePort
service/nginx exposed

–type=LoadBalancer is not supported using this approach

You can manually create OCI load balancer that routes to the cluster or expose the node port publicly via Oracle Cloud Infrastructure’s security lists.

[root@k8s-master ~]# kubectl get services
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP        8h
nginx        NodePort    10.102.90.50   <none>        80:30215/TCP   6s

[root@k8s-master ~]# NodePort=$(kubectl get svc nginx --output=jsonpath='{range.spec.ports[0]}{.nodePort}')
[root@k8s-master ~]# echo $NodePort
30215

[root@k8s-master ~]# curl 10.102.90.50:30215
curl: (7) Failed connect to 10.102.90.50:30215; Network is unreachable

curl http://${worker_node_public_ip}:${NodePort}

[root@k8s-master ~]# curl 10.0.0.7:30215
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
    body {
        width: 35em;
        margin: 0 auto;
        font-family: Tahoma, Verdana, Arial, sans-serif;
    }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>
 
<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>
 
<p><em>Thank you for using nginx.</em></p>
</body>
</html>

[root@k8s-master ~]# cat /etc/kubernetes/admin.conf
apiVersion: v1
clusters:
- cluster:
    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1EZ3dOekUzTlRreU9Gb1hEVEk1TURnd05ERNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSzdECis3WEd0cnhOYlhsaXVNMFdFWlErL09xRGZmMmRUUzhobUVRdk9oN3pLU3ZYbnhxRDA2YUxMcTRaVGJOaFBOT0oKWDdxejBMcmp3a2c1bzlpSGF6Y1VEp2b0daNDBkT2lCSXlVd08rWUhpMWNmU2FlblV2ZApNeEh5MWpCbHRuNkVpSFhWR29yM2NFM3dpZW1CZmlhUzR3Yy96ZWZJdExHSlFHd040ZzNqTW1CekxNNkYxOW5kCmxvVHBwdCtyMW5FbktzODhiSzBZK09oellGZXFXTGJBUGhzaTc0UzBjUFNwK0xKNEhIZnpZbjFlcXJqNTFONTMmYxK2xaWlJ4K0J0T3dlNlpIVDJoL3JiVjRiQUNUcEl0WENWZjhLbVdkNkx0R2g5LzNieQpxVVI2a0hmSHRUckw1Z0ViSmxzQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFDMmbWVJd0dDQkU3aVoKVXArTHhlMVdLanFyMytoNW12cmZWcDFYeUtrUGJOTmFvWTVYSnRxYktYWVg5Q1FSb3JTT0crRzJ4N2NxcXRIVQpaeEY5ZnJ4QjVwVjhITWRYTUdteHpoSnFXZTB5Tnd4K0gweWV3dkIxMytYS3M4Zi9iY2VmNVhXOWhaMG5mMG9PCi9iM0cxcnJ6VTNEQzhKSElVQvQU1UbFRSZ1Nhd1pzcnNaemhsRFFkVlJxeUw2aWpHOVcKQU91VkhaSFhMVmhldUZhcFZtaGdqWFhqMEFWRGVTUUtlWmk4b3hqanZwbnp0Q3NwcGUyMlVPdEVLd0oxNVBlKwpyKzkvN2haTUpkK3RyM0k5WVZPdVZnVU8yd2ljaWVKUUlxU0wrZHN5RHdjSFpkR3M2QWtabDhuOE9CZzVElGSUNBVEUtLS0tLQo=
    server: https://10.0.0.6:6443
  name: kubernetes
contexts:
- context:
    cluster: kubernetes
    user: kubernetes-admin
  name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
  user:
    client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRUpjb210aTh3Q1l3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4T1RBNE1EY3hOelU1TWpoYUZ3MHlkZhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTBmcnZHVWw0L2Q5a2JxOTcKbWxjRGlFaVRYdWxrYUhHU29DbFlzN1TVVaemdhL1VYbWFiVEJENks5MUhWaktTbEdDMgo1Uzlndk5qV09yRjlWeG94TU1LVWJHQTEvSFFUd3puVk44TEE2WXFpUmdmV210dzhYbUtFUitKYmVkMkI3RllXCmh6V3MvbkZlK0lVMUNTNzRnZ1BkcHhLQ0hsei9aT2Fxb1RUWjBJblNsL2FudzMvTjh2TEV5aTVHNXhFU2NFSnoG9mOEx0ZUxHeHpLOHB3bysyek1GTUJZU2d0aXE1em5PWWhsRVREWDFWWkFucmlrcndRMwp2T1Z5Y01JZzR4dnU4cGpoeW5ZZUlIY3lnTnovZzBodE4xSHFNanZTQXlJMlMrZllKaERVRkg3NU9vVzlCeHR4CjcrUkQ2d0lEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFdJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFKSm10V3ZqMW1CTEJyWEF0MU5mT1FJbERndFc3d056d3dZcQpjaDRSQUxnOURUa2REemY5MlJqaXQrMkhCMzVDaEsyRTdDdHVYTE4xTG1QaEFxY0hXYjRPVVF3OGpYNEZoSy9rCnloand4Ly8wR2Z6VWpnQ1JwOU1FZUUHFVQ1JYQWxTcVp3NVNOWktZR2k4QytzQ09FaWwKK1JDTm5mZFd3Z0lSRG00VmVVUWZtc3VWQVoxZWk1c01CUmxlak5vSlFZRkpwMllYZjNFQXZneHFrVmhZVHFCOApkbEpxa2syWjhuMC8zOTVBVUQ3WU9uMThnb29sOUdoM3ZiNmx2blY0NW9VM3J5TDlhUGhSdUVyeUk4Slp5W6QjV3eHFVVEg1L1hVcDFIdjBwaS9IVnJKMWpoWFJWTGl0ZW55OEpvdz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
    client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcGdJQkFBS0NBUUVBMGZydkdVbDQvZDlrYnE5N21sY0RpRWlUWHVsa2FIR1NvQ2xZczRQODRvVDZocVNpCmcrYzVNVVp6Z2EvVVhtYWJUQkQ2SzkxSFZqS1NsR0MyNVM5Z3ZOaldPckY5VnhHd6blYKTjhMQTZZcWlSZ2ZXbXR3OFhtS0VSK0piZWQyQjdGWVdoeldzL25GZStJVTFDUzc0Z2dQZHB4S0NIbHovWk9hcQpvVFRaMEluU2wvYW53My9OOHZMRXlpNUc1eEVTY0VKem16cTNCWVROMWd2bXhvZjhMdGVMR3h6Szhwd28rMnpNCkZNQllTZ3RpcTV6bk9ZaGxFVERYMVZaQNSWc0eHZ1OHBqaHluWWVJSGN5Z056L2cwaHQKTjFIcU1qdlNBeUkyUytmWUpoRFVGSDc1T29XOUJ4dHg3K1JENndJREFRQUJBb0lCQVFDUTRuaEFGbllwbGc2UwpZUHNFVDYzY1IxZUlVYm82WnNNcFI5NHdYNDRLTG44K2tES2ltTlRacnliY0FScWoyR0NuWjc5MnJNUWdNZHo2CkRmtzQ25vM3huU2ZjQkNaVVdObTBZSHA1VzhqeS9RNm0rbGd2a1VseDEzRERUYUxZRU0KTllwdDJSZDRxWGxDT1llU3dwb3QrOFRoRnVBNVdla0pXa1ppRk5HN2o1OVQ5UTV1amxmM2dGU0lBOTNOV3VhQQphbmZxWXhiRW02dnRVbE84MnI0Yy9jczlINlBuV1VkR1AzbVI5aHZjU3JrRhN2RTNU5uCjBGV0M1U2MxV0RHTU5hZEwwQ3pIRTNoYTZXMmtRQU43VHBOM0hRMXloVnVybnB1NmhnLzVJZ2xsN0dDYWV3SEcKR05zR01ZQ2hBb0dCQVBrRTN1OENBYURMUENTSFN1VkVoMTRlZ1FkOElJbzd0a1BIQ2xBU0tteGxVWklkZkQrSwpHK05DSkhRMGhKZXRYYzNVY3BNUWFnRXcmxrTnFVVUdOdjloajNuMWJyaWRaUnY1RVFNCjZSR3NVb3cxNlhTME9lVXFPOGlwMGh0aVNiTURNSU05WGo0YjVST0puK3hqOHZuVGlxYTNsU245QW9HQkFOZmQKNU9nWUpwYUU1a1JvR2J0MG85Z1RoL245dXVvY1UyMEZNbm5JNTdTVkxsRDZsS3luSEZrYUF5N1hyWXh0MFFNTRT0h4UXlNc3RUdHMySDJ6alY2bXBZMDFsTSsvd2oxOE5rUXh6SkdONDg0cElrblpKbjFtCmQxUmhZbGYvUjhCaVVyYXlPaHFxd2J5OUJRTGhnMUU1UWRqNGcvWUhBb0dCQU9xZFRqVTlYSzlVQy93V2c5ZnkKY3QrWU9kVUZlOXNCV3o0TVg0a0ZOSGNnRm5SbDExUStHUHhLcXZIWk1XN0WE5GcklBegpUNjFQRlhVcUY5bjhUMWVsU3o4MEdFaVZhRlpML1hLbi8wY3BaalMzN3BhSDN5d3NXRnM4dExvY0puZkVmNGFHCjRTMy9OaHJzUllLQXdNSGtMdlBZYjI4NUFvR0JBSkkxakFhYjNQOFVNdmJnWDVWMU1raDNLaUZyOXY3OGdwRVQKOVJWQnU1YTNlSENHMk01OFBabtMU9tT0JTZUlQTWsvdkZLUFVRRTB4ZlY2R2h6aGV3MwpDV2kwZ3YrWGpWT2gzd3VmZzBTUEMxNG14RCtxb2dSTHM4Vm1WSHJ2d0VTUkZCOEVacENMV2IraGMxUjVORDErCmY1Z2E4eDVmQW9HQkFJRVcyOXlYcTRFR3ZHTEdQTXNUUUt5a2NibFhMY2tYSDdMNTlFV0dJUnpkSnVUWkYWU3Wng0amtXMEJ4NkVibyszV3RERXIrd1V3U2ZuWW1pdXplQlFjd0Z2R2dQU1RrcmtrVzFHTwpPa0xlTzlQblNNRXNlWXNFTzh3elJhRk5iQmoycVZ5WFJxWmxtMmxhSlpLSzBkNW53YXNoc3BvRwotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=


Step 5 : Connect To Cluster from Local

Copy the /etc/kubernetes/admin.conf file from the master to your local machine

Replace the private IP with that of master Public IP. Then point the KUBECONFIG environment variable to the config file saved above

D:\practices\kubernetes\conf>set KUBECONFIG=D:\practices\kubernetes\conf\admin.conf

D:\practices\kubernetes\conf>echo %KUBECONFIG%
D:\practices\kubernetes\conf\admin.conf

D:\practices\kubernetes\conf>kubectl get nodes
Unable to connect to the server: x509: certificate is valid for 10.96.0.1, 10.0.0.6, not 130.61.240.53
 
D:\practices\kubernetes\conf>kubectl --insecure-skip-tls-verify get nodes
NAME         STATUS    ROLES     AGE       VERSION
k8s-master   Ready     master    9h        v1.15.2
k8s-worker   Ready     <none>    28m       v1.15.2

References

Also See

Loadbalanced Apache With OKE Tomcat Instances

Prerequisites

Clustering Tomcat With Loadbalanced Apache

Design

  • OCI load balancer and bastion server in public subnet, everything else in private subnet
  • Two apache web servers under two OCI loadbalancers
  • Apache Web servers have access to Plain Tomcat host over AJP.
  • Apache Web servers interact with OKE tomcat instaces through Kubernetes internal service over AJP
  • There are two OKE private loadbalancer subnets and three oke private worker subnets.

Implementation

Lets create 2 oke load balancer subnets and 3 oke worker subnets

Lets create security lists

this is for loadbalancer to worker nodes

This is for worker nodes

This is for apache to oke lbs

Here are the subnet details

Lets create oke cluster

Lets use the bastion ssh key

Cluster created

Node details

PS C:\WINDOWS\system32> kubectl get nodes -o=wide
NAME         STATUS    ROLES     AGE       VERSION   EXTERNAL-IP   OS-IMAGE                  KERNEL-VERSION                   CONTAINER-RUNTIME
10.0.10.11   Ready     node      7h        v1.12.7   <none>        Oracle Linux Server 7.6   4.14.35-1902.2.0.el7uek.x86_64   docker://18.9.1
10.0.11.10   Ready     node      7h        v1.12.7   <none>        Oracle Linux Server 7.6   4.14.35-1902.2.0.el7uek.x86_64   docker://18.9.1
10.0.12.13   Ready     node      7h        v1.12.7   <none>        Oracle Linux Server 7.6   4.14.35-1902.2.0.el7uek.x86_64   docker://18.9.1
PS C:\WINDOWS\system32>

able to ssh into nodes from bastion

[opc@bastion01-4772 ~]$ ssh [email protected]
The authenticity of host '10.0.10.11 (10.0.10.11)' can't be established.
ECDSA key fingerprint is SHA256:AjoqfhDP/v1alWhfO6wkb4zNfSo6c6PI9hTmjy6n+cI.
ECDSA key fingerprint is MD5:65:56:a3:1a:c5:b8:e0:91:ea:59:60:df:e5:31:8d:87.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.10.11' (ECDSA) to the list of known hosts.
Oracle Linux Server 7.6
[opc@oke-c4dczdcmyzd-nydgnrvg44d-snzovaz7x6q-0 ~]$ exit
logout
Connection to 10.0.10.11 closed.
 
[opc@bastion01-4772 ~]$ ssh [email protected]
The authenticity of host '10.0.11.10 (10.0.11.10)' can't be established.
ECDSA key fingerprint is SHA256:DFgXRe/cUha3luqY8NJKiTIfnSzndjUCxTydi2kQLjw.
ECDSA key fingerprint is MD5:96:8b:6e:08:c9:56:c9:7b:52:85:4b:1e:bf:e9:e3:5c.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.11.10' (ECDSA) to the list of known hosts.
Oracle Linux Server 7.6
[opc@oke-c4dczdcmyzd-nydgnrvg44d-svwagmiyvwq-0 ~]$ exit
logout
Connection to 10.0.11.10 closed.
 
[opc@bastion01-4772 ~]$ ssh [email protected]
The authenticity of host '10.0.12.13 (10.0.12.13)' can't be established.
ECDSA key fingerprint is SHA256:1dMIxDQKxZ7mBCHKHTaCNuZ1bCIQmXikvLtFx6pEMFA.
ECDSA key fingerprint is MD5:27:b3:8d:b1:53:51:1e:25:11:be:22:83:a5:09:0f:61.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.0.12.13' (ECDSA) to the list of known hosts.
Oracle Linux Server 7.6
[opc@oke-c4dczdcmyzd-nydgnrvg44d-sa6zgwm6lrq-0 ~]$ exit
logout
Connection to 10.0.12.13 closed.

Lets create secret to be used in deployment descriptor

D:\practices\kubernetes\demo>docker login fra.ocir.io
Authenticating with existing credentials...
Login Succeeded

D:\practices\kubernetes\demo>kubectl create secret docker-registry ociregsecret --docker-server=fra.ocir.io --docker-username=demo/oracleidentitycloudservice/[email protected] [email protected] --docker-password=z{}ssfd-6LTIgtNGMG
secret "ociregsecret" created

D:\practices\kubernetes\demo>docker tag mnadeem/date-service:latest fra.ocir.io/demo/docker-registry/date-service:1.0

D:\practices\kubernetes\demo>docker push  fra.ocir.io/demo/docker-registry/date-service:1.0
The push refers to repository [fra.ocir.io/demo/docker-registry/date-service]
49932b8d1844: Pushed
fdb37f3a3522: Pushed
b7d850202de0: Pushed
d1d0b1719b96: Pushed
48988bb7b861: Pushed
edd61588d126: Pushed
9b9b7f3d56a0: Pushed
f1b5933fe4b5: Pushed
1.0: digest: sha256:2922633eec7e81183983e8f3654fa847da6feb4a1fafdf1b750c62d77714e62f size: 1989

apiVersion: v1
kind: Service
metadata:
  name: demo-date
  annotations:
   service.beta.kubernetes.io/oci-load-balancer-internal: "true"
spec:
  type: LoadBalancer
  ports:
  - name: http
    port: 8080
    targetPort: 8080
  - name: ajp
    port: 8009
    targetPort: 8009
  selector:
    app: demo-date
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: demo-date
spec:
  selector:
    matchLabels:
     app: demo-date
  replicas: 1
  template:
    metadata:
      labels:
        app: demo-date
    spec:
      containers:
      - name: demo-date 
        image: fra.ocir.io/demo/docker-registry/date-service:1.0
        ports:
        - containerPort: 8080
          name: http
        - containerPort: 8009
          name: ajp      
      imagePullSecrets:
      - name: ociregsecret

This would work as well

apiVersion: v1
kind: Service
metadata:
  name: demo-date
  annotations:
   service.beta.kubernetes.io/oci-load-balancer-internal: "true"
spec:
  type: LoadBalancer
  ports:
  - port: 8009
    targetPort: 8009
  selector:
    app: demo-date
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: demo-date
spec:
  selector:
    matchLabels:
     app: demo-date
  replicas: 1
  template:
    metadata:
      labels:
        app: demo-date
    spec:
      containers:
      - name: demo-date 
        image: fra.ocir.io/demo/docker-registry/date-service:1.0
        ports:
        - containerPort: 8080
          name: http
      imagePullSecrets:
      - name: ociregsecret 

D:\practices\kubernetes\demo>kubectl apply -f date-service.yml
service "demo-date" created
deployment.apps "demo-date" created

D:\practices\kubernetes\demo>kubectl get pods
NAME                         READY     STATUS    RESTARTS   AGE
demo-date-74fc5bf665-2gfwf   1/1       Running   0          16s

D:\practices\kubernetes\demo>kubectl get services
NAME         TYPE           CLUSTER-IP     EXTERNAL-IP   PORT(S)                         AGE
demo-date    LoadBalancer   10.96.226.54   10.0.21.4     8080:30386/TCP,8009:30569/TCP   2h
kubernetes   ClusterIP      10.96.0.1      <none>        443/TCP                         12h
 
D:\practices\kubernetes\demo>

Internal Load balancer automatically created.for service

Add the following to apache instances (worker.properties under /etc/httpd/conf)

Here is the file

workers.apache_log=/var/log/httpd
 
worker.list=tomcat1Worker,tomcat2Worker,okeWorker
 
worker.tomcat1Worker.type=ajp13
worker.tomcat1Worker.host=10.0.2.2
worker.tomcat1Worker.port=8009
worker.tomcat1Worker.socket_keepalive=1
worker.tomcat1Worker.connection_pool_timeout=300
 
worker.tomcat2Worker.type=ajp13
worker.tomcat2Worker.host=10.0.2.3
worker.tomcat2Worker.port=8009
worker.tomcat2Worker.socket_keepalive=1
worker.tomcat2Worker.connection_pool_timeout=300
 
worker.okeWorker.type=ajp13
worker.okeWorker.host=10.0.21.4
worker.okeWorker.port=8009
worker.okeWorker.socket_keepalive=1
worker.okeWorker.connection_pool_timeout=300

Update the /etc/httpd/conf.modules.d/mod_jk.conf

mod_jk.conf

# Load mod_jk module
LoadModule    jk_module "/etc/httpd/modules/mod_jk.so"
# Add the module (activate this lne for Apache 1.3)
# AddModule     mod_jk.c
# Where to find workers.properties
JkWorkersFile /etc/httpd/conf/workers.properties
# Where to put jk shared memory
JkShmFile     /var/run/httpd/mod_jk.shm
# Where to put jk logs
JkLogFile     /var/log/httpd/mod_jk.log
# Set the jk log level [debug/error/info]
JkLogLevel    debug
 
JkLogStampFormat "[%a %b %d %H:%M:%S %Y]"
JkOptions +ForwardKeySize +ForwardURICompat -ForwardDirectories
JkRequestLogFormat "%w %V %T"
 
# Mounts
JkMount  /sample* tomcat1Worker
JkMount  /examples* tomcat2Worker
 
JkMount  /date-service* okeWorker

Things are working like a charm.

Final subnets

Final Security lists













References

Creating Private (Internal) Load Balancer in OKE

Just add the following annotation

service.beta.kubernetes.io/oci-load-balancer-internal: “true”

Here is an example

kind: Service
apiVersion: v1
metadata:
 name: sample-app-internal-svc
 annotations:
   service.beta.kubernetes.io/oci-load-balancer-internal: “true”
spec:
 selector:
   app: sample-app
 ports:
 - protocol: TCP
   targetPort: 8080
   port: 80
 type: LoadBalancer

References

File Storage Service With Kubernetes (OKE) on Oracle Cloud Infrastructure (OCI)

Design

Implementation

Create File System

Follow the instructions here to create one

Here is my configuration, note file system is being created on same subnet, and hence security list modification not required, otherwise we would have to modify security list

Create StorageClass

Get the OCID of the mount target

Create the storage class that references the mount target

storageClass.yml

kind: StorageClass
apiVersion: storage.k8s.io/v1beta1
metadata:
  name: oci-fss
provisioner: oracle.com/oci-fss
parameters:
  mntTargetId: ocid1.mounttarget.oc1.phx.aaaaaa4np2snz72kobuhqllqojxwiotqnb4c2ylefuzqaaaa

D:\practices\kubernetes>kubectl apply -f storageClass.yml
storageclass.storage.k8s.io "oci-fss" created
 
D:\practices\kubernetes>kubectl get storageclass
NAME            PROVISIONER          AGE
oci (default)   oracle.com/oci       9d
oci-fss         oracle.com/oci-fss   14s
 
D:\practices\kubernetes>

Create a Persistent Volume (PV)

Make a note of ip address and export path

oke-pv.yml

apiVersion: v1
kind: PersistentVolume
metadata:
 name: oke-fsspv
spec:
 storageClassName: oci-fss
 capacity:
  storage: 100Gi
 accessModes:
  - ReadWriteMany
 mountOptions:
  - nosuid
 nfs:
  server: 10.0.2.10
  path: "/oke-file-system"
  readOnly: false

D:\practices\kubernetes>kubectl apply -f oke-pv.yml
persistentvolume "oke-fsspv" created

pv created

D:\practices\kubernetes>kubectl get pv -o wide
NAME        CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM     STORAGECLASS   REASON    AGE
oke-fsspv   100Gi      RWX            Retain           Available             oci-fss                  37s
 
D:\practices\kubernetes>

Create a Persistent Volume Claim (PVC)

oke-pvc.yml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
 name: oke-fsspvc
spec:
 storageClassName: oci-fss
 accessModes:
 - ReadWriteMany
 resources:
  requests:
    storage: 100Gi
 volumeName: oke-fsspv

D:\practices\kubernetes>kubectl apply -f oke-pvc.yml
persistentvolumeclaim "oke-fsspvc" created

pvc created

D:\practices\kubernetes>kubectl get pvc -o wide
NAME         STATUS    VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
oke-fsspvc   Bound     oke-fsspv   100Gi      RWX            oci-fss        21s
 
D:\practices\kubernetes>

Verify that PVC is bound

D:\practices\kubernetes>kubectl get pvc oke-fsspvc
NAME         STATUS    VOLUME      CAPACITY   ACCESS MODES   STORAGECLASS   AGE
oke-fsspvc   Bound     oke-fsspv   100Gi      RWX            oci-fss        1m
 
D:\practices\kubernetes>

Consume the PVC

consume-pvc.yml

apiVersion: v1
kind: Service
metadata:
  name: oke-fss-pvc-svc
spec:
  type: LoadBalancer
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: oke-fss-pvc
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    app: oke-fss-pvc
  name: oke-fss-pvc
spec:
  selector:
    matchLabels:
      app: oke-fss-pvc
  replicas: 3
  template:
    metadata:
      labels:
        app: oke-fss-pvc
    spec:
      containers:
      - name: oke-fss-pvc 
        image: nginx
        ports:
        - containerPort: 80
        volumeMounts:
        - name: nfs-mount
          mountPath: "/usr/share/nginx/html/"
      volumes:
      - name: nfs-mount
        persistentVolumeClaim:
          claimName: oke-fsspvc
          readOnly: false

D:\practices\kubernetes>kubectl apply -f consume-pvc.yml
service "oke-fss-pvc-svc" created
deployment.apps "oke-fss-pvc" created
 
D:\practices\kubernetes>

D:\practices\kubernetes>kubectl get pods
NAME                               READY     STATUS    RESTARTS   AGE
oke-fss-pvc-6fdf5c767b-4g6gx       1/1       Running   0          23s
oke-fss-pvc-6fdf5c767b-npsdl       1/1       Running   0          23s
oke-fss-pvc-6fdf5c767b-wrwwf       1/1       Running   0          23s

D:\practices\kubernetes>kubectl get services
NAME                  TYPE           CLUSTER-IP      EXTERNAL-IP                                    PORT(S)        AGE
oke-fss-pvc-svc       LoadBalancer   10.96.149.248   129.146.147.113                                80:30647/TCP   32s

Testing

no index.html file as of now

Lets connect to all the pods and write some content from each pods.

Lets write from pod oke-fss-pvc-6fdf5c767b-4g6gx

D:\practices\kubernetes>kubectl exec -it oke-fss-pvc-6fdf5c767b-4g6gx bash
root@oke-fss-pvc-6fdf5c767b-4g6gx:/# cd /usr/share/nginx/html/
root@oke-fss-pvc-6fdf5c767b-4g6gx:/usr/share/nginx/html# echo oke-fss-pvc-6fdf5c767b-4g6gx >> index.html
root@oke-fss-pvc-6fdf5c767b-4g6gx:/usr/share/nginx/html#

able to see the content written

Lets write some content from pod oke-fss-pvc-6fdf5c767b-npsdl

D:\practices\kubernetes>kubectl exec -it oke-fss-pvc-6fdf5c767b-npsdl bash
root@oke-fss-pvc-6fdf5c767b-npsdl:/# cd /usr/share/nginx/html/
root@oke-fss-pvc-6fdf5c767b-npsdl:/usr/share/nginx/html# echo oke-fss-pvc-6fdf5c767b-npsdl >> index.html
root@oke-fss-pvc-6fdf5c767b-npsdl:/usr/share/nginx/html#

able to see the content written

Lets write some content from pod oke-fss-pvc-6fdf5c767b-wrwwf

D:\practices\kubernetes>kubectl exec -it oke-fss-pvc-6fdf5c767b-wrwwf bash
root@oke-fss-pvc-6fdf5c767b-wrwwf:/# cd /usr/share/nginx/html/
root@oke-fss-pvc-6fdf5c767b-wrwwf:/usr/share/nginx/html# echo <br/> oke-fss-pvc-6fdf5c767b-wrwwf >> index.html
bash: br/: No such file or directory
root@oke-fss-pvc-6fdf5c767b-wrwwf:/usr/share/nginx/html# echo "<br/> oke-fss-pvc-6fdf5c767b-wrwwf" >> index.html
root@oke-fss-pvc-6fdf5c767b-wrwwf:/usr/share/nginx/html#

able to see the content written

Lets delete all the pods

D:\practices\kubernetes>kubectl delete pods --all
pod "oke-fss-pvc-6fdf5c767b-4g6gx" deleted
pod "oke-fss-pvc-6fdf5c767b-npsdl" deleted
pod "oke-fss-pvc-6fdf5c767b-wrwwf" deleted

D:\practices\kubernetes>kubectl get pods
NAME                               READY     STATUS              RESTARTS   AGE
oke-fss-pvc-6fdf5c767b-6676n       1/1       Running             0          29s
oke-fss-pvc-6fdf5c767b-cqhwz       0/1       ContainerCreating   0          30s
oke-fss-pvc-6fdf5c767b-gmgq8       1/1       Running             0          28s
 
D:\practices\kubernetes>

Data is persisted

Also See

OKE Ingress Controller

Benifits

  • Normally, a LoadBalancer service is created for each public system that has to be exposed, which exposes a public ip address as well, which can end up being expensive.
  • Ingress gives a way to route requests to services based on the request host or path, decreasing the number of public ip address to just one.
  • The Ingress Controller listens to the Kubernetes API for Ingress resources and routes these requests to the pods according to these rules.
  • Essentially, an Ingress Controller is a system that is able to do reverse proxying.

Prerequisites

Example Backend

Download the project and build the the two images,

date-service> mvn clean package docker:build
ui> mvn clean package docker:build

D:\practices\kubernetes\app>docker tag mnadeem/date-service:latest phx.ocir.io/demo-tenancy/docker-registry/date-service:1.0

PS D:\practices\kubernetes\app> docker push phx.ocir.io/demo-tenancy/docker-registry/date-service:1.0
The push refers to repository [phx.ocir.io/demo-tenancy/docker-registry/date-service]
49932b8d1844: Pushed
fdb37f3a3522: Pushed
b7d850202de0: Pushed
d1d0b1719b96: Pushed
48988bb7b861: Pushed
edd61588d126: Pushed
9b9b7f3d56a0: Pushed
f1b5933fe4b5: Pushed
1.0: digest: sha256:2922633eec7e81183983e8f3654fa847da6feb4a1fafdf1b750c62d77714e62f size: 1989

demo-date-svc-ngx.yml

apiVersion: v1
kind: Service
metadata:
  name: demo-date-svc
spec:
  type: LoadBalancer
  ports:
  - port: 8088
    protocol: TCP
    targetPort: 8080
  selector:
    app: demo-date
  type: ClusterIP
   
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: demo-date
spec:
  selector:
    matchLabels:
     app: demo-date
  replicas: 2
  template:
    metadata:
      labels:
        app: demo-date
    spec:
      containers:
      - name: demo-date  
        image: phx.ocir.io/demo-tenancy/docker-registry/date-service:1.0
        ports:
        - containerPort: 8080
      imagePullSecrets:
      - name: ocirsecret

PS D:\practices\kubernetes\app> kubectl apply -f .\demo-date-svc-ngx.yml
service "demo-date-svc" created
deployment.apps "demo-date" created

Creating Resources

Kubernetes RBAC

Grant the Kubernetes RBAC cluster-admin clusterrole to the user

kubectl create clusterrolebinding <my-cluster-admin-binding> --clusterrole=cluster-admin --user=<user_OCID>

where is a string of your choice to be used as the name for the binding between the user and the Kubernetes RBAC cluster-admin clusterrole

PS D:\practices\kubernetes\app> kubectl create clusterrolebinding mnadeem_clst_adm --clusterrole=cluster-admin --user=ocid1.user.oc1..aaaaaaaaq3g4b42vj33h2p4iibye7xlpu7jnzzeosuzuwxakktbzcl5uruxa
clusterrolebinding.rbac.authorization.k8s.io "mnadeem_clst_adm" created
PS D:\practices\kubernetes\app>

Create mandatory resources

PS D:\practices\kubernetes\app> kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml
namespace "ingress-nginx" created
configmap "nginx-configuration" created
configmap "tcp-services" created
configmap "udp-services" created
serviceaccount "nginx-ingress-serviceaccount" created
clusterrole.rbac.authorization.k8s.io "nginx-ingress-clusterrole" created
role.rbac.authorization.k8s.io "nginx-ingress-role" created
rolebinding.rbac.authorization.k8s.io "nginx-ingress-role-nisa-binding" created
clusterrolebinding.rbac.authorization.k8s.io "nginx-ingress-clusterrole-nisa-binding" created
deployment.apps "nginx-ingress-controller" created

Ingress Controller

ingress controller service as a load balancer service

cloud-generic.yaml

kind: Service
apiVersion: v1
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  type: LoadBalancer
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
  ports:
    - name: http
      port: 80
      targetPort: http
    - name: https
      port: 443
      targetPort: https

PS D:\practices\kubernetes\app>  kubectl apply -f cloud-generic.yml
service "ingress-nginx" created

Verify the service

PS D:\practices\kubernetes\app>  kubectl get svc -n ingress-nginx
NAME            TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)                      AGE
ingress-nginx   LoadBalancer   10.96.131.117   <pending>     80:31671/TCP,443:32739/TCP   11s
 
 
PS D:\practices\kubernetes\app>  kubectl get svc -n ingress-nginx
NAME            TYPE           CLUSTER-IP      EXTERNAL-IP      PORT(S)                      AGE
ingress-nginx   LoadBalancer   10.96.131.117   129.146.88.240   80:31671/TCP,443:32739/TCP   43s

PS D:\practices\kubernetes\app> kubectl get svc --all-namespaces
NAMESPACE       NAME                   TYPE           CLUSTER-IP      EXTERNAL-IP       PORT(S)                      AGE
default         demo-date-svc          ClusterIP      10.96.230.80    <none>            8088/TCP                     18s
default         kubernetes             ClusterIP      10.96.0.1       <none>            443/TCP                      1d
default         qa-crm                 LoadBalancer   10.96.166.129   129.146.158.251   80:31091/TCP                 22h
ingress-nginx   ingress-nginx          LoadBalancer   10.96.131.117   129.146.88.240    80:31671/TCP,443:32739/TCP   15m
kube-system     kube-dns               ClusterIP      10.96.5.5       <none>            53/UDP,53/TCP                1d
kube-system     kubernetes-dashboard   ClusterIP      10.96.90.38     <none>            443/TCP                      1d
kube-system     tiller-deploy          ClusterIP      10.96.134.35    <none>            44134/TCP                    1d

Bind controller with backend

Create TLS Secret

Bind Service

demo-date-svc-ngx.yml

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: demo-date-ing
  annotations:
    kubernetes.io/ingress.class: "nginx"
spec:
  tls:
  - secretName: tls-secret
  rules:
  - http:
      paths:
      - backend:
          serviceName: demo-date-svc
          servicePort: 8088

PS D:\practices\kubernetes\app> kubectl apply -f .\ingress.yml
ingress.extensions "demo-date-ing" created
PS D:\practices\kubernetes\app>


Another way to define Ingress

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: demo-date-ing
  annotations:
    ingress.kubernetes.io/rewrite-target: /
spec:
  rules:
  - http:
      paths:
        - path: /date-service
          backend:
            serviceName: demo-date-svc
            servicePort: 8088
        - path: /ui
          backend:
            serviceName: ui-svc
            servicePort: 8088

Testing

$ curl -I http://129.146.88.240
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
  0   172    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0HTTP/1.1 308 Permanent Redirect
Server: nginx/1.15.10
Date: Sun, 07 Jul 2019 12:23:57 GMT
Content-Type: text/html
Content-Length: 172
Connection: keep-alive
Location: https://129.146.88.240/

$ curl -ikL http://129.146.88.240/date-service
  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current
                                 Dload  Upload   Total   Spent    Left  Speed
100   172  100   172    0     0    297      0 --:--:-- --:--:-- --:--:--   305
  0     0    0     0    0     0      0      0 --:--:--  0:00:04 --:--:--     0
100   110  100   110    0     0     21      0  0:00:05  0:00:05 --:--:--     0HTTP/1.1 308 Permanent Redirect
Server: nginx/1.15.10
Date: Sun, 07 Jul 2019 12:25:54 GMT
Content-Type: text/html
Content-Length: 172
Connection: keep-alive
Location: https://129.146.88.240/date-service
 
HTTP/2 302
server: nginx/1.15.10
date: Sun, 07 Jul 2019 12:25:58 GMT
location: /date-service/
strict-transport-security: max-age=15724800; includeSubDomains
 
HTTP/2 200
server: nginx/1.15.10
date: Sun, 07 Jul 2019 12:25:58 GMT
content-type: text/html;charset=ISO-8859-1
content-length: 110
set-cookie: JSESSIONID=1FEB33B8AC00C7AA45B6EF8EA1938429; Path=/date-service; HttpOnly
strict-transport-security: max-age=15724800; includeSubDomains
 
 
 
 
 
 
demo-date-c48f6f44d-tz6vc Sun Jul 07 12:25:58 GMT 2019  ;  /usr/local/tomcat/webapps/date-service

Editing

PS D:\practices\kubernetes\app> kubectl get ingress
NAME            HOSTS     ADDRESS          PORTS     AGE
demo-date-ing   *         129.146.88.240   80        2h

D:\practices\kubernetes\app> kubectl edit ingress demo-date-ing
   ingress.extensions "demo-date-ing" editedPowershell


PS D:\practices\kubernetes\app> kubectl describe ingress demo-date-ing
Name:             demo-date-ing
Namespace:        default
Address:          129.146.88.240
Default backend:  default-http-backend:80 (<none>)
Rules:
  Host  Path  Backends
  ----  ----  --------
  *
        /date-service   demo-date-svc:8088 (<none>)
        /abc            demo-date-svc:8088 (<none>)
Annotations:
  ingress.kubernetes.io/rewrite-target:              /
  kubectl.kubernetes.io/last-applied-configuration:  {"apiVersion":"extensions/v1beta1","kind":"Ingress","metadata":{"annotations":{"ingress.kubernetes.io/rewrite-target":"/"},"name":"demo-date-ing","namespace":"default"},"spec":{"rules":[{"http":{"paths":[{"backend":{"serviceName":"demo-date-svc","servicePort":8088},"path":"/https/reachmnadeem.wordpress.com/date-service"},{"backend":{"serviceName":"demo-date-svc","servicePort":8088},"path":"/https/reachmnadeem.wordpress.com/pqr"}]}}]}}
 
Events:
  Type    Reason  Age               From                      Message
  ----    ------  ----              ----                      -------
  Normal  CREATE  2h                nginx-ingress-controller  Ingress default/demo-date-ing
  Normal  UPDATE  22s (x4 over 2h)  nginx-ingress-controller  Ingress default/demo-date-ing
PS D:\practices\kubernetes\app>

Connecting Kubernetes Applications (Tomcat) With Services On Oracle Cloud Infrastructure (OCI)

This will demonstrate the following

  • Communication between two services.
  • Configuring services using environment entries.

Design

Create Docker images

Download the project and execute the following commands on both projects to generate the docker images.

date-service> mvn clean package docker:build
ui> mvn clean package docker:build

Upload Docker Images

Follow the process to upload the docker images to OKE registry

Images uploaded to registry

Create OKE Cluster

Follow this process

Deploy the Services

Create secret

kubectl create secret docker-registry ocirsecret --docker-server=fra.ocir.io --docker-username=demo-tenancy/[email protected] --docker-password=sfdsfdf.(sfs[POx [email protected]


D:\practices\kubernetes\svc-comm>kubectl get secret
NAME                  TYPE                                  DATA      AGE
default-token-sls6g   kubernetes.io/service-account-token   3         23h
ocirsecret            kubernetes.io/dockerconfigjson        1         10h


D:\practices\kubernetes\svc-comm>kubectl describe secret  ocirsecret
Name:         ocirsecret
Namespace:    default
Labels:       <none>
Annotations:  <none>
 
Type:  kubernetes.io/dockerconfigjson
 
Data
====
.dockerconfigjson:  240 bytes


Create Configmap

ui.xml

<?xml version="1.0" encoding="UTF-8"?>
<Context path="/ui" debug="0" reloadable="false" useHttpOnly="true"
    clearReferencesHttpClientKeepAliveThread="true"
    clearReferencesStopThreads="true" clearReferencesStopTimerThreads="true">
 
 
    <Environment name="app" type="java.lang.String"
        value="${catalina.base}/webapps/echo-service" />
 
    <Environment name="url" type="java.lang.String"
        value="http://demo-date/date-service" />
 
</Context>

D:\workspaces\kubernetes\docker-compose\ui\src\main\webapp\META-INF>kubectl create configmap ui-config --from-file=ui.xml


D:\workspaces\kubernetes\docker-compose\ui\src\main\webapp\META-INF>kubectl get configmap
NAME        DATA      AGE
ui-config   1         1h


D:\workspaces\kubernetes\docker-compose\ui\src\main\webapp\META-INF>kubectl describe configmap ui-config
Name:         ui-config
Namespace:    default
Labels:       <none>
Annotations:  <none>
 
Data
====
ui.xml:
----
<?xml version="1.0" encoding="UTF-8"?>
<Context path="/ui" debug="0" reloadable="false" useHttpOnly="true"
  clearReferencesHttpClientKeepAliveThread="true"
  clearReferencesStopThreads="true" clearReferencesStopTimerThreads="true">
 
 
  <Environment name="app" type="java.lang.String"
    value="${catalina.base}/webapps/echo-service" />
 
  <Environment name="url" type="java.lang.String"
    value="http://demo-date/date-service" />
 
</Context>
Events:  <none>
 
D:\workspaces\kubernetes\docker-compose\ui\src\main\webapp\META-INF>


Cluster detail

D:\practices\kubernetes\svc-comm>kubectl cluster-info
Kubernetes master is running at https://c4gcmzqhezd.eu-frankfurt-1.clusters.oci.oraclecloud.com:6443
KubeDNS is running at https://c4gcmzqhezd.eu-frankfurt-1.clusters.oci.oraclecloud.com:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
 
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.


D:\practices\kubernetes\svc-comm>kubectl get services kube-dns --namespace=kube-system
NAME       TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)         AGE
kube-dns   ClusterIP   10.96.5.5    <none>        53/UDP,53/TCP   1d
 
D:\practices\kubernetes\svc-comm>


D:\practices\kubernetes\svc-comm>kubectl get nodes
NAME        STATUS    ROLES     AGE       VERSION
10.0.10.2   Ready     node      2h        v1.12.7
10.0.11.2   Ready     node      2h        v1.12.7
10.0.12.2   Ready     node      2h        v1.12.7


Delete existing services


D:\practices\kubernetes\svc-comm>kubectl delete -f demo-date-app-lb.yml
service "demo-date-app-service" deleted
deployment.apps "demo-date-app-deployment" deleted
 
D:\practices\kubernetes\svc-comm>kubectl delete -f ui-app-lb.yml
service "ui-app-service" deleted
deployment.apps "ui-app-deployment" deleted

Create services

demo-date-app-lb.yml

apiVersion: v1
kind: Service
metadata:
  name: demo-date
spec:
  type: LoadBalancer
  ports:
  - port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app: demo-date
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: demo-date
spec:
  selector:
    matchLabels:
     app: demo-date
  replicas: 1
  template:
    metadata:
      labels:
        app: demo-date
    spec:
      containers:
      - name: demo-date  
        image: fra.ocir.io/demotenancy/docker-registry/demo-date-service:1.0
        ports:
        - containerPort: 8080
      imagePullSecrets:
      - name: ocirsecret


D:\practices\kubernetes\svc-comm>kubectl apply -f demo-date-app-lb.yml
service "demo-date-app-service" created
deployment.apps "demo-date-app-deployment" created


ui-app-lb.yml

apiVersion: v1
kind: Service
metadata:
  name: ui-app
spec:
  type: LoadBalancer
  ports:
  - port: 80
    protocol: TCP
    targetPort: 8080
  selector:
    app: ui-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
  labels:
    service: ui-app
  name: ui-app
spec:
  selector:
    matchLabels:
      app: ui-app
  replicas: 1
  template:
    metadata:
      labels:
        app: ui-app
    spec:
      containers:
      - name: ui-app  
        image: fra.ocir.io/demotenancy/docker-registry/demo-ui:1.0
        ports:
        - containerPort: 8080
        volumeMounts:
        - name: my-config
          mountPath: /usr/local/tomcat/conf/Catalina/localhost     
      imagePullSecrets:
      - name: ocirsecret
      volumes:
      - name: my-config
        configMap:
         name: ui-config


D:\practices\kubernetes\svc-comm>kubectl apply -f ui-app-lb.yml
service "ui-app-service" created
deployment.apps "ui-app-deployment" created


Cluster Details

D:\practices\kubernetes\svc-comm>kubectl get pods
NAME                                       READY     STATUS    RESTARTS   AGE
demo-date-app-deployment-94877df6f-gs72q   1/1       Running   0          23s
ui-app-deployment-c94f8d95-2ffbw           1/1       Running   0          11s


D:\practices\kubernetes\svc-comm>kubectl get services
NAME         TYPE           CLUSTER-IP     EXTERNAL-IP      PORT(S)        AGE
demo-date    LoadBalancer   10.96.43.136   132.145.240.19   80:31283/TCP   2h
kubernetes   ClusterIP      10.96.0.1      <none>           443/TCP        2h
ui-app       LoadBalancer   10.96.120.3    132.145.244.52   80:31811/TCP   1h
 
D:\practices\kubernetes\svc-comm>


ui-app service environment details

D:\workspaces\kubernetes\docker-compose\ui\src\main\webapp\META-INF>kubectl exec ui-app-c94f8d95-mqklf -- printenv
PATH=/usr/local/tomcat/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/lib/jvm/java-1.8-openjdk/jre/bin:/usr/lib/jvm/java-1.8-openjdk/bin
HOSTNAME=ui-app-c94f8d95-mqklf
UI_APP_SERVICE_HOST=10.96.120.3
UI_APP_SERVICE_PORT=80
UI_APP_PORT_80_TCP=tcp://10.96.120.3:80
UI_APP_PORT_80_TCP_ADDR=10.96.120.3
KUBERNETES_SERVICE_PORT=443
KUBERNETES_PORT_443_TCP_PROTO=tcp
DEMO_DATE_SERVICE_HOST=10.96.43.136
DEMO_DATE_PORT_80_TCP_PROTO=tcp
UI_APP_PORT_80_TCP_PROTO=tcp
UI_APP_PORT_80_TCP_PORT=80
KUBERNETES_SERVICE_HOST=10.96.0.1
KUBERNETES_SERVICE_PORT_HTTPS=443
KUBERNETES_PORT=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1
DEMO_DATE_PORT=tcp://10.96.43.136:80
DEMO_DATE_PORT_80_TCP=tcp://10.96.43.136:80
DEMO_DATE_PORT_80_TCP_PORT=80
DEMO_DATE_PORT_80_TCP_ADDR=10.96.43.136
KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443
KUBERNETES_PORT_443_TCP_PORT=443
DEMO_DATE_SERVICE_PORT=80
UI_APP_PORT=tcp://10.96.120.3:80
LANG=C.UTF-8
JAVA_HOME=/usr/lib/jvm/java-1.8-openjdk/jre
JAVA_VERSION=8u212
JAVA_ALPINE_VERSION=8.212.04-r0
CATALINA_HOME=/usr/local/tomcat
TOMCAT_NATIVE_LIBDIR=/usr/local/tomcat/native-jni-lib
LD_LIBRARY_PATH=/usr/local/tomcat/native-jni-lib
GPG_KEYS=05AB33110949707C93A279E3D3EFE6B686867BA6 07E48665A34DCAFAE522E5E6266191C37C037D42 47309207D818FFD8DCD3F83F1931D684307A10A5 541FBE7D8F78B25E055DDEE13C370389288584E7 61B832AC2F1C5A90F0F9B00A1C506407564C17A3 79F7026C690BAA50B92CD8B66A3AD3F4F22C4FED 9BA44C2621385CB966EBA586F72C284D731FABEE A27677289986DB50844682F8ACB77FC2E86E29AC A9C5DF4D22E99998D9875A5110C01C5A2F6059E7 DCFD35E0BF8CA7344752DE8B6FB21E8933C60243 F3A04C595DB5B6A5F1ECA43E3B7BBB100D811BBE F7DA48BB64BCB84ECBA7EE6935CD23C10D498E23
TOMCAT_MAJOR=9
TOMCAT_VERSION=9.0.20
TOMCAT_SHA512=6d2df51f0bfc6a90cfca61c86473b8843da4162c430ab06b8f66f364931f3d8a3ad399703acdd600ff4f633d7d6725edf05d5d5d19534716a2f3f9f5238a32a0
TOMCAT_TGZ_URLS=https://www.apache.org/dyn/closer.cgi?action=download&filename=tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz  https://www-us.apache.org/dist/tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz     https://www.apache.org/dist/tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz        https://archive.apache.org/dist/tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz
TOMCAT_ASC_URLS=https://www.apache.org/dyn/closer.cgi?action=download&filename=tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz.asc         https://www-us.apache.org/dist/tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz.asc      https://www.apache.org/dist/tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz.asc    https://archive.apache.org/dist/tomcat/tomcat-9/v9.0.20/bin/apache-tomcat-9.0.20.tar.gz.asc
HOME=/root


Lets connect to ui-app pod

D:\practices\kubernetes\svc-comm>kubectl exec -it ui-app-deployment-c94f8d95-2ffbw bash


ui context.xml is copied properly

bash-4.4# cd conf/Catalina/localhost/
bash-4.4# ls
ui.xml


D:\workspaces\kubernetes\docker-compose\ui\src\main\webapp\META-INF>kubectl -it exec ui-app-c94f8d95-mqklf bash
bash-4.4# wget http://demo-date/date-service
Connecting to demo-date (10.96.43.136:80)
date-service         100% |*****************************************************************************************************************************************************|    34  0:00:00 ETA
bash-4.4# cat date-service
 
 
Mon Jul 01 10:09:44 GMT 2019
bash-4.4#


date-service is up

ui app is up and able to communicate to date-service.

References

Deploy Custom Docker Image To OKE Cluster

We will do the Following

  • Push custom Docker image to OCI Registry
  • Deploy this custom docker image to OKE
  • Scale it

Prerequisites

Create Kubernetes seceret

Format

kubectl create secret docker-registry ocirsecret --docker-server=<region-code>.ocir.io --docker-username='<tenancy-name>/<oci-username>' --docker-password='<oci-auth-token>' --docker-email='<email-address>'

Actual Command

$ kubectl create secret docker-registry ocirsecret --docker-server=fra.ocir.io --docker-username=demo/[email protected] --docker-password=A<AFDS}r2[754:{o> [email protected]
 
 
secret "ocirsecret" created

If your tenancy is federated with Oracle Identity Cloud Service, use the format {tenancy-name}/oracleidentitycloudservice/{username} for user name

$ kubectl create secret docker-registry ocirsecret --docker-server=fra.ocir.io --docker-username=demo/oracleidentitycloudservice/[email protected] --docker-password=A<AFDS}r2[754:{o> [email protected]
 
 
secret "ocirsecret" created

$ kubectl get secrets
NAME                  TYPE                                  DATA      AGE
default-token-g8xkv   kubernetes.io/service-account-token   3         20m
ocirsecret            kubernetes.io/dockerconfigjson        1         12s

Create Kubenetes Manifest File

hellodocker-lb.yml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: hellodocker-deployment
spec:
  selector:
    matchLabels:
      app: hellodocker
  replicas: 1
  template:
    metadata:
      labels:
        app: hellodocker
    spec:
      containers:
      - name: hellodocker  
        image: fra.ocir.io/demo/docker-registry/hellodocker:1.0
        ports:
        - containerPort: 80
      imagePullSecrets:
      - name: ocirsecret
---
apiVersion: v1
kind: Service
metadata:
  name: hellodocker-service
spec:
  type: LoadBalancer
  ports:
  - port: 80
    protocol: TCP
    targetPort: 80
  selector:
    app: hellodocker

image: {region-code}.ocir.io/{tenancy-name}/{repo-name}/{image-name}:{tag}

for imagePullSecrets specify the secreted created, in this case ocirsecret

Note initially there is only one replica

Deploy kubernetes application

$ kubectl create -f hellodocker-lb.yml
deployment.apps "hellodocker-deployment" created
service "hellodocker-service" created

Observe there is only one deployment

$ kubectl get deployments
NAME                     DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
hellodocker-deployment   1         1         1            1           29s

$ kubectl get pods -o wide
NAME                                      READY     STATUS    RESTARTS   AGE       IP           NODE
hellodocker-deployment-7c76c98f65-9pb6x   1/1       Running   0          38s       10.244.2.2   10.0.11.2

keep a note of external ip address

$ kubectl get services
NAME                  TYPE           CLUSTER-IP    EXTERNAL-IP      PORT(S)        AGE
hellodocker-service   LoadBalancer   10.96.89.63   132.145.246.30   80:31296/TCP   57s
kubernetes            ClusterIP      10.96.0.1     <none>           443/TCP        28m

OCI Loadbalancer would be automatically created.

Access the application with external ip address.

$ kubectl describe service hellodocker-service
Name:                     hellodocker-service
Namespace:                default
Labels:                   <none>
Annotations:              <none>
Selector:                 app=hellodocker
Type:                     LoadBalancer
IP:                       10.96.89.63
LoadBalancer Ingress:     132.145.246.30
Port:                     <unset>  80/TCP
TargetPort:               80/TCP
NodePort:                 <unset>  31296/TCP
Endpoints:                10.244.2.2:80
Session Affinity:         None
External Traffic Policy:  Cluster
Events:
  Type    Reason                Age   From                Message
  ----    ------                ----  ----                -------
  Normal  EnsuringLoadBalancer  4m    service-controller  Ensuring load balancer
  Normal  EnsuredLoadBalancer   3m    service-controller  Ensured load balancer

Scale Deployment

$ kubectl get deployments
NAME                     DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
hellodocker-deployment   1         1         1            1           4m

$ kubectl scale --replicas=4 deployment/hellodocker-deployment
deployment.extensions "hellodocker-deployment" scaled

scaled

$ kubectl get deployments
NAME                     DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
hellodocker-deployment   4         4         4            4           6m

$ kubectl describe deployment hellodocker-deployment
Name:                   hellodocker-deployment
Namespace:              default
CreationTimestamp:      Tue, 02 Apr 2019 00:20:25 +0530
Labels:                 <none>
Annotations:            deployment.kubernetes.io/revision=1
Selector:               app=hellodocker
Replicas:               4 desired | 4 updated | 4 total | 4 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=hellodocker
  Containers:
   hellodocker:
    Image:        fra.ocir.io/srepreprod1/docker-registry/hellodocker:1.0
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Progressing    True    NewReplicaSetAvailable
  Available      True    MinimumReplicasAvailable
OldReplicaSets:  <none>
NewReplicaSet:   hellodocker-deployment-7c76c98f65 (4/4 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  7m    deployment-controller  Scaled up replica set hellodocker-deployment-7c76c98f65 to 1
  Normal  ScalingReplicaSet  47s   deployment-controller  Scaled up replica set hellodocker-deployment-7c76c98f65 to 4

Note that host-name changes

Clean up

$ kubectl delete deployment hellodocker-deployment
deployment.extensions "hellodocker-deployment" deleted

This would automatically delete the loadbalancer .

$ kubectl delete service hellodocker-service
service "hellodocker-service" deleted