Skip to main content
Create a basic Managed Kubernetes cluster
Last update:

Create a basic Managed Kubernetes cluster

We recommend that you create resources in order. If you create all resources at once, Terraform will take into account the dependencies between resources that you specify in the configuration file. If dependencies are not specified, resources will be created in parallel, which can cause errors. For example, a resource that is required to create another resource may not have been created yet.


  1. Optional: configure the providers.
  2. Create a private network and subnet.
  3. Create a cloud router connected to an external network.
  4. Create a base cluster.
  5. Create a node group with a network disk.

Configuration files

Example file for configuring providers
terraform {
required_providers {
servercore = {
source = "terraform.servercore.com/servercore/servercore"
version = "~> 6.0"
}
openstack = {
source = "terraform-provider-openstack/openstack"
version = "2.1.0"
}
}
}

provider "servercore" {
domain_name = "123456"
username = "user"
password = "password"
auth_region = "ru-9"
auth_url = "https://cloud.api.selcloud.ru/identity/v3/"
}

resource "servercore_project_v2" "project_1" {
name = "project"
}

resource "servercore_iam_serviceuser_v1" "serviceuser_1" {
name = "username"
password = "password"
role {
role_name = "member"
scope = "project"
project_id = servercore_project_v2.project_1.id
}
}

provider "openstack" {
auth_url = "https://cloud.api.selcloud.ru/identity/v3"
domain_name = "123456"
tenant_id = servercore_project_v2.project_1.id
user_name = servercore_iam_serviceuser_v1.serviceuser_1.name
password = servercore_iam_serviceuser_v1.serviceuser_1.password
region = "ru-9"
}
Example file for a base cluster with nodes of arbitrary configuration
resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
}

data "openstack_networking_network_v2" "external_network_1" {
external = true
}

resource "openstack_networking_router_v2" "router_1" {
name = "router"
external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.router_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

data "servercore_mks_kube_versions_v1" "versions" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
}

resource "servercore_mks_cluster_v1" "cluster_1" {
name = "basic_cluster"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
kube_version = data.servercore_mks_kube_versions_v1.versions.latest_version
zonal = true
enable_patch_version_auto_upgrade = false
network_id = openstack_networking_network_v2.network_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
maintenance_window_start = "00:00:00"
}

resource "servercore_mks_nodegroup_v1" "nodegroup_1" {
cluster_id = servercore_mks_cluster_v1.cluster_1.id
project_id = servercore_mks_cluster_v1.cluster_1.project_id
region = servercore_mks_cluster_v1.cluster_1.region
availability_zone = "ru-9a"
nodes_count = "2"
cpus = 2
ram_mb = 4096
volume_gb = 32
volume_type = "fast.ru-9a"
install_nvidia_device_plugin = false
labels = {
"label-key0": "label-value0",
"label-key1": "label-value1",
"label-key2": "label-value2",
}
}
Example file for a base cluster with fixed configuration nodes (flavors)
resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
}

data "openstack_networking_network_v2" "external_network_1" {
external = true
}

resource "openstack_networking_router_v2" "router_1" {
name = "router"
external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.router_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

data "servercore_mks_kube_versions_v1" "versions" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
}

resource "servercore_mks_cluster_v1" "cluster_1" {
name = "basic_cluster"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
kube_version = data.servercore_mks_kube_versions_v1.versions.latest_version
zonal = true
enable_patch_version_auto_upgrade = false
network_id = openstack_networking_network_v2.network_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
maintenance_window_start = "00:00:00"
}

resource "servercore_mks_nodegroup_v1" "nodegroup_1" {
cluster_id = servercore_mks_cluster_v1.cluster_1.id
project_id = servercore_mks_cluster_v1.cluster_1.project_id
region = servercore_mks_cluster_v1.cluster_1.region
availability_zone = "ru-9a"
nodes_count = "2"
flavor_id = "1011"
volume_gb = 32
volume_type = "fast.ru-9a"
install_nvidia_device_plugin = false
labels = {
"label-key0": "label-value0",
"label-key1": "label-value1",
"label-key2": "label-value2",
}
}

1. Optional: configure providers

If you have configured Servercore and OpenStack providers, skip this step.

  1. Ensure that in the Control Panel you have created a service user with the Account Administrator and User Administrator roles.

  2. Create a directory to store the configuration files and a separate file with a .tf extension to configure the providers.

  3. Add Servercore and OpenStack providers to the file to configure the providers:

    terraform {
    required_providers {
    servercore = {
    source = "terraform.servercore.com/servercore/servercore"
    version = "~> 6.0"
    }
    openstack = {
    source = "terraform-provider-openstack/openstack"
    version = "2.1.0"
    }
    }
    }

    Here version — versions of providers. The current version of the Openstack provider can be found in Terraform Registry and GitHub.

    Learn more about the products, services, and services that can be managed with providers in the Servercore and OpenStack Providers instruction.

  4. Initialize the Servercore provider:

    provider "servercore" {
    domain_name = "123456"
    username = "user"
    password = "password"
    auth_region = "ru-9"
    auth_url = "https://cloud.api.selcloud.ru/identity/v3/"
    }

    Here:

    • domain_name — Servercore account number. You can look it up in control panel in the upper right corner;
    • username — username service user with the roles Account Administrator and User Administrator. Can be viewed in the control panel section Access ControlUser Management → tab Service Users (the section is available only to the Account Owner and User Administrator);
    • password — password of the service user. You can view it when creating a user or change it to a new one;
    • auth_region — pool for example ru-9. All resources will be created in this pool. The list of available pools can be found in the instructions Availability matrices.
  5. Create a project:

    resource "servercore_project_v2" "project_1" {
    name = "project"
    }

    View a detailed description of the servercore_vpc_project_v2 resource.

  6. Create a service user to access the project and assign the Project Administrator role to it:

    resource "servercore_iam_serviceuser_v1" "serviceuser_1" {
    name = "username"
    password = "password"
    role {
    role_name = "member"
    scope = "project"
    project_id = servercore_project_v2.project_1.id
    }
    }

    Here:

    • username — username;
    • password — user password. The password must be no shorter than eight characters and contain Latin letters of different cases and digits;
    • project_id — Project ID. You can view it in control panel: section Cloud Platform → open the projects menu (the name of the current project) → in the line of the required project press .

    View a detailed description of the servercore_iam_serviceuser_v1 resource.

  7. Initialize the OpenStack provider:

    provider "openstack" {
    auth_url = "https://cloud.api.selcloud.ru/identity/v3"
    domain_name = "123456"
    tenant_id = servercore_project_v2.project_1.id
    user_name = servercore_iam_serviceuser_v1.serviceuser_1.name
    password = servercore_iam_serviceuser_v1.serviceuser_1.password
    region = "ru-9"
    }

    Here:

    • domain_name — Servercore account number. You can look it up in control panel in the upper right corner;
    • region — pool for example ru-9. All resources will be created in this pool. The list of available pools can be found in the instructions Availability matrices.
  8. If you create resources at the same time as configuring providers, add the depends_on argument for OpenStack resources . For example, for the resource openstack_networking_network_v2:

    resource "openstack_networking_network_v2" "network_1" {
    name = "private-network"
    admin_state_up = "true"

    depends_on = [
    servercore_project_v2.project_1,
    servercore_iam_serviceuser_v1.serviceuser_1
    ]
    }
  9. Open the CLI.

  10. Initialize the Terraform configuration in the directory:

    terraform init
  11. Check that the configuration files have been compiled without errors:

    terraform validate
  12. Format the configuration files:

    terraform fmt
  13. Check the resources that will be created:

    terraform plan
  14. Apply the changes and create the resources:

    terraform apply
  15. Confirm the creation — type yes and press Enter. The created resources are displayed in the control panel.

  16. If there were not enough quotas to create resources, increase the quotas.

2. Create a private network and subnet

resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
name = "private-subnet"
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
dns_nameservers = ["188.93.16.19", "188.93.17.19"]
enable_dhcp = false
}

Here:

  • cidr — CIDR of the private subnet, e.g. 192.168.199.0/24;
  • dns_nameservers — DNS servers, e.g. DNS Servercore 188.93.16.19 и 188.93.17.19.

See a detailed description of the resources:

3. Create a cloud router connected to an external network

A cloud router connected to an external network acts as a 1:1 NAT for access from a private network to the Internet through the public IP address of the router.

data "openstack_networking_network_v2" "external_network_1" {
external = true
}

resource "openstack_networking_router_v2" "router_1" {
name = "router"
external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.router_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

See a detailed description of the resources:

4. Create a base cluster

data "servercore_mks_kube_versions_v1" "versions" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
}

resource "servercore_mks_cluster_v1" "cluster_1" {
name = "basic_cluster"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
kube_version = data.servercore_mks_kube_versions_v1.versions.latest_version
zonal = true
enable_patch_version_auto_upgrade = false
network_id = openstack_networking_network_v2.network_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
maintenance_window_start = "00:00:00"
}

Here region is a pool, e.g. ru-9. The list of available pools can be found in the Accessibility Matrix instructions.

View a detailed description of the servercore_mks_cluster_v1 resource.

5. Create a node group with a network disk

resource "servercore_mks_nodegroup_v1" "nodegroup_1" {
cluster_id = servercore_mks_cluster_v1.cluster_1.id
project_id = servercore_mks_cluster_v1.cluster_1.project_id
region = servercore_mks_cluster_v1.cluster_1.region
availability_zone = "ru-9a"
nodes_count = "2"
flavor_id = "1011"
volume_gb = 32
volume_type = "fast.ru-9a"
install_nvidia_device_plugin = false
labels = {
"label-key0": "label-value0",
"label-key1": "label-value1",
"label-key2": "label-value2",
}
}

Here:

  • availability_zone — pool segment in which the group of nodes will be located, e.g. ru-9a;

  • nodes_count — number of working nodes in the node group. The maximum number of nodes is 15;

  • flavor_id — Flavor ID. The flavors correspond to cloud server configurations and determine the number of vCPUs, RAM, and local disk size (optional) of the node. For example, 3031 — flavor to create a node with a GPU Line configuration with 4 vCPUs, 32 GB RAM. You can see the list of flavors in a specific pool in the Openstack CLI;

  • volume_gb — is the size of the disk in GB. If the disk size is specified in the configuration you selected in the flavor_id, then the argument volume_gb argument does not need to be specified;

  • volume_type — disk type format <type>.<pool_segment> for example basic.ru-9a:

    • <type> — basic, universal or fast;
    • <pool_segment> — pool segment in which the network disk will be created, e.g. ru-9a;
  • install_nvidia_device_plugin — confirms or cancel the installation of the GPU drivers and NVIDIA® Device Plugin:

    • true — for GPU flavors confirms that the GPU drivers and NVIDIA® Device Plugin are installed;
    • false — for both GPU and non-GPU flavors will cancel the installation of the GPU drivers and NVIDIA® Device Plugin. You can independently install drivers for node groups with GPUs.

View a detailed description of the servercore_mks_nodegroup_v1 resource.