Skip to main content
Create a server, a Managed Kubernetes cluster and a cloud database cluster in a private subnetwork
Last update:

Create a server, a Managed Kubernetes cluster and a cloud database cluster in a private subnetwork

This is an example of building an infrastructure that consists of:

  • from a private subnet 192.168.199.0/24;
  • a cloud server of arbitrary configuration with a bootable network disk and an additional network disk;
  • fault-tolerant Managed Kubernetes cluster with nodes of arbitrary configuration;
  • MySQL semi-sync cluster of arbitrary configuration.

We recommend create resources in order. If you create all the resources that are described in the configuration file The Terraform creates resources regardless of the order in which they are listed in the file.


  1. Optional: configure your ISPs.

  2. Create an SSH key pair.

  3. Create a private network and subnet.

  4. Create a cloud router connected to an external network.

  5. Create a cloud server.

  6. Create a Managed Kubernetes cluster.

  7. Create a cloud database cluster.

Configuration files

Example file for configuring providers
terraform {
required_providers {
servercore = {
source = "terraform.servercore.com/servercore/servercore"
version = "~>6.0.0"
}
openstack = {
source = "terraform-provider-openstack/openstack"
version = "2.1.0"
}
}
}

provider "servercore" {
domain_name = "123456"
username = "user"
password = "password"
auth_region = "ru-9"
auth_url = "https://cloud.api.selcloud.ru/identity/v3/"
}

resource "servercore_project_v2" "project_1" {
name = "project"
}

resource "servercore_iam_serviceuser_v1" "serviceuser_1" {
name = "username"
password = "password"
role {
role_name = "member"
scope = "project"
project_id = servercore_project_v2.project_1.id
}
}

provider "openstack" {
auth_url = "https://cloud.api.selcloud.ru/identity/v3"
domain_name = "123456"
tenant_id = servercore_project_v2.project_1.id
user_name = servercore_iam_serviceuser_v1.serviceuser_1.name
password = servercore_iam_serviceuser_v1.serviceuser_1.password
region = "ru-9"
}
Example file for creating an arbitrary configuration server with a bootable network drive and an additional network drive
resource "servercore_keypair_v2" "keypair_1" {
name = "keypair"
public_key = file("~/.ssh/id_rsa.pub")
user_id = servercore_iam_serviceuser_v1.serviceuser_1.id
}

resource "openstack_compute_flavor_v2" "flavor_1" {
name = "custom-flavor-with-network-volume"
vcpus = 2
ram = 2048
disk = 0
is_public = false

lifecycle {
create_before_destroy = true
}

}

resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
}

data "openstack_networking_network_v2" "external_network_1" {
external = true
}

resource "openstack_networking_router_v2" "router_1" {
name = "router"
external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.router_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

resource "openstack_networking_port_v2" "port_1" {
name = "port"
network_id = openstack_networking_network_v2.network_1.id

fixed_ip {
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}
}

data "openstack_images_image_v2" "image_1" {
name = "Ubuntu 20.04 LTS 64-bit"
most_recent = true
visibility = "public"
}

resource "openstack_blockstorage_volume_v3" "volume_1" {
name = "boot-volume-for-server"
size = "5"
image_id = data.openstack_images_image_v2.image_1.id
volume_type = "fast.ru-9a"
availability_zone = "ru-9a"
enable_online_resize = true

lifecycle {
ignore_changes = [image_id]
}

}

resource "openstack_blockstorage_volume_v3" "volume_2" {
name = "additional-volume-for-server"
size = "7"
volume_type = "universal.ru-9a"
availability_zone = "ru-9a"
enable_online_resize = true
}

resource "openstack_compute_instance_v2" "server_1" {
name = "server"
flavor_id = openstack_compute_flavor_v2.flavor_1.id
key_pair = servercore_keypair_v2.keypair_1.name
availability_zone = "ru-9a"

network {
port = openstack_networking_port_v2.port_1.id
}

lifecycle {
ignore_changes = [image_id]
}

block_device {
uuid = openstack_blockstorage_volume_v3.volume_1.id
source_type = "volume"
destination_type = "volume"
boot_index = 0
}

block_device {
uuid = openstack_blockstorage_volume_v3.volume_2.id
source_type = "volume"
destination_type = "volume"
boot_index = -1
}

vendor_options {
ignore_resize_confirmation = true
}
}

resource "openstack_networking_floatingip_v2" "floatingip_1" {
pool = "external-network"
}

resource "openstack_networking_floatingip_associate_v2" "association_1" {
port_id = openstack_networking_port_v2.port_1.id
floating_ip = openstack_networking_floatingip_v2.floatingip_1.address
}

output "public_ip_address" {
value = openstack_networking_floatingip_v2.floatingip_1.fixed_ip
}
Example file for creating a fault-tolerant Managed Kubernetes cluster with nodes of arbitrary configuration
resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
name = "private-subnet"
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
}

data "openstack_networking_network_v2" "external_network_1" {
external = true
}

resource "openstack_networking_router_v2" "router_1" {
name = "router"
external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.router_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

data "servercore_mks_kube_versions_v1" "versions" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
}

resource "servercore_mks_cluster_v1" "cluster_1" {
name = "high_availability_cluster"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
kube_version = data.servercore_mks_kube_versions_v1.versions.latest_version
network_id = openstack_networking_network_v2.network_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
maintenance_window_start = "00:00:00"
}

resource "servercore_mks_nodegroup_v1" "nodegroup_1" {
cluster_id = servercore_mks_cluster_v1.cluster_1.id
project_id = servercore_mks_cluster_v1.cluster_1.project_id
region = servercore_mks_cluster_v1.cluster_1.region
availability_zone = "ru-9a"
nodes_count = "2"
cpus = 2
ram_mb = 4096
volume_gb = 32
volume_type = "fast.ru-9a"
install_nvidia_device_plugin = false
labels = {
"label-key0": "label-value0",
"label-key1": "label-value1",
"label-key2": "label-value2",
}
}
Example file for creating a MySQL semi-sync cluster of arbitrary configuration
resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
}

data "servercore_dbaas_datastore_type_v1" "datastore_type_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
filter {
engine = "mysql_native"
version = "8"
}
}

resource "servercore_dbaas_mysql_datastore_v1" "datastore_1" {
name = "datastore-1"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
type_id = data.servercore_dbaas_datastore_type_v1.datastore_type_1.datastore_types[0].id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
node_count = 3
flavor {
vcpus = 1
ram = 4096
disk = 32
}
}

resource "servercore_dbaas_user_v1" "user_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
datastore_id = servercore_dbaas_mysql_datastore_v1.datastore_1.id
name = "user"
password = "secret"
}

resource "servercore_dbaas_mysql_database_v1" "database_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
datastore_id = servercore_dbaas_mysql_datastore_v1.datastore_1.id
name = "database_1"
}

resource "servercore_dbaas_grant_v1" "grant_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
datastore_id = servercore_dbaas_mysql_datastore_v1.datastore_1.id
database_id = servercore_dbaas_mysql_database_v1.database_1.id
user_id = servercore_dbaas_user_v1.user_1.id
}

1. optional: configure providers

If you're set up the ISPs Servercore and OpenStack, skip this step.

  1. Make sure that in the control panel you created a service user with the Account Administrator and User Administrator roles.

  2. Create a directory to store the configuration files and a separate file with the extension .tf to configure the ISPs.

  3. Add Servercore and OpenStack providers to the file to configure the providers:

    terraform {
    required_providers {
    servercore = {
    source = "terraform.servercore.com/servercore/servercore"
    version = "6.0.0"
    }
    openstack = {
    source = "terraform-provider-openstack/openstack"
    version = "2.1.0"
    }
    }
    }

    Here version — provider versions. The current version of the Openstack provider can be viewed at Terraform Registry and GitHub.

    Read more about products, services and services that can be managed with providers in the instructions Servercore and OpenStack providers.

  4. Initialize the Servercore provider:

    provider "servercore" {
    domain_name = "123456"
    username = "user"
    password = "password"
    auth_region = "ru-9"
    auth_url = "https://cloud.api.selcloud.ru/identity/v3/"
    }

    Here:

    • domain_name — Servercore account number. You can look in control panels in the upper right-hand corner;
    • username — name service user with the Account Administrator and User Administrator roles. You can look in control panels: section Identity & Access ManagementUser management → tab Service users (the section is only available to the Account Owner and User Administrator);
    • password — service user password. You can view it when creating a user or change to a new one;
    • auth_region — pool for example ru-9. All resources will be created in this pool. The list of available pools can be found in the instructions Availability matrices.
  5. Create a project:

    resource "servercore_project_v2" "project_1" {
    name = "project"
    }

    Check out the detailed description of the resource servercore_vpc_project_v2.

  6. Create a service user to access the project and assign the Project Administrator role to it:

    resource "servercore_iam_serviceuser_v1" "serviceuser_1" {
    name = "username"
    password = "password"
    role {
    role_name = "member"
    scope = "project"
    project_id = servercore_project_v2.project_1.id
    }
    }

    Here:

    • username — username;
    • password — user password. The password must be no shorter than eight characters and contain Latin letters of different cases and digits;
    • project_id — Project ID. You can look in control panels: section Cloud platform → open the project menu (name of the current project) → in the line of the desired project, click .

    Check out the detailed description of the resource servercore_iam_serviceuser_v1.

  7. Initialize the OpenStack provider:

    provider "openstack" {
    auth_url = "https://cloud.api.selcloud.ru/identity/v3"
    domain_name = "123456"
    tenant_id = servercore_project_v2.project_1.id
    user_name = servercore_iam_serviceuser_v1.serviceuser_1.name
    password = servercore_iam_serviceuser_v1.serviceuser_1.password
    region = "ru-9"
    }

    Here:

    • domain_name — Servercore account number. You can look in control panels in the upper right-hand corner;
    • region — pool for example ru-9. All resources will be created in this pool. The list of available pools can be found in the instructions Availability matrices.
  8. If at the same time you are setting up your providers resource creation then for OpenStack resources add the argument depends_on. For example, for the openstack_networking_network_v2 resource:

    resource "openstack_networking_network_v2" "network_1" {
    name = "private-network"
    admin_state_up = "true"

    depends_on = [
    servercore_project_v2.project_1,
    servercore_iam_serviceuser_v1.serviceuser_1
    ]
    }
  9. Open the CLI.

  10. Initialize the Terraform configuration in the directory:

    terraform init
  11. Check that the configuration files have been compiled without errors:

    terraform validate
  12. Format the configuration files:

    terraform fmt
  13. Check the resources that will be created:

    terraform plan
  14. Apply the changes and create the resources:

    terraform apply
  15. Confirm creation — enter yes and press Enter. The created resources are displayed in the control panel.

  16. If there were insufficient quotas to create resources, increase quotas.

2. Create an SSH key pair

resource "servercore_keypair_v2" "keypair_1" {
name = "keypair"
public_key = file("~/.ssh/id_rsa.pub")
user_id = servercore_iam_serviceuser_v1.serviceuser_1.id
}

Here. public_key — path to the public SSH key. If SSH keys have not been created, generate them.

Check out the detailed description of the resource servercore_vpc_keypair_v2.

3.Create a private network and subnetwork

resource "openstack_networking_network_v2" "network_1" {
name = "private-network"
admin_state_up = "true"
}

resource "openstack_networking_subnet_v2" "subnet_1" {
name = "private-subnet"
network_id = openstack_networking_network_v2.network_1.id
cidr = "192.168.199.0/24"
}

Here. cidr — CIDR of a private subnet, e.g. 192.168.199.0/24.

See a detailed description of the resources:

4. Create a cloud router connected to an external network

A cloud router connected to an external network acts as a 1:1 NAT for access from a private network to the Internet through the public IP address of the router.

data "openstack_networking_network_v2" "external_network_1" {
external = true
}

resource "openstack_networking_router_v2" "router_1" {
name = "router"
external_network_id = data.openstack_networking_network_v2.external_network_1.id
}

resource "openstack_networking_router_interface_v2" "router_interface_1" {
router_id = openstack_networking_router_v2.router_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}

See a detailed description of the resources:

5. Create a cloud server

  1. Create a port for the cloud server.

  2. Get an image.

  3. Create a bootable disk.

  4. Create a server.

  5. Create a public IP address.

  6. Assign the association of the public and private IP address of the cloud server.

1. Create a port for the cloud server

resource "openstack_networking_port_v2" "port_1" {
name = "port"
network_id = openstack_networking_network_v2.network_1.id

fixed_ip {
subnet_id = openstack_networking_subnet_v2.subnet_1.id
}
}

Check out the detailed description of the resource openstack_networking_port_v2.

2. Get an image

data "openstack_images_image_v2" "image_1" {
name = "Ubuntu 20.04 LTS 64-bit"
most_recent = true
visibility = "public"
}

Check out the detailed description of the data source openstack_images_image_v2.

3. Create a bootable network disk

resource "openstack_blockstorage_volume_v3" "volume_1" {
name = "boot-volume-for-server"
size = "5"
image_id = data.openstack_images_image_v2.image_1.id
volume_type = "fast.ru-9a"
availability_zone = "ru-9a"
enable_online_resize = true

lifecycle {
ignore_changes = [image_id]
}

}

Here:

Check out the detailed description of the resource openstack_blockstorage_volume_v3.

4. Create a cloud server

resource "openstack_compute_instance_v2" "server_1" {
name = "server"
flavor_id = "4011"
key_pair = servercore_keypair_v2.keypair_1.name
availability_zone = "ru-9a"

network {
port = openstack_networking_port_v2.port_1.id
}

lifecycle {
ignore_changes = [image_id]
}

block_device {
uuid = openstack_blockstorage_volume_v3.volume_1.id
source_type = "volume"
destination_type = "volume"
boot_index = 0
}

vendor_options {
ignore_resize_confirmation = true
}
}

Here:

  • availability_zone — pool segment where the cloud server will be created, e.g. ru-9a. The list of available pool segments can be found in the instructions Availability matrix;
  • flavor_id — Flavor ID. The flavors correspond to cloud server configurations and determine the number of vCPUs, RAM and local disk size (optional) of the server. You can use fixed configuration flavors. For example, 4011 — ID to create a Memory Line fixed-configuration server with 2 vCPUs, 16 GB RAM in a ru-9 pool. The list of flavors can be viewed in the table List of fixed-configuration flavorings in all pools.

Check out the detailed description of the resource openstack_compute_instance_v2.

5. Create a public IP address

resource "openstack_networking_floatingip_v2" "floatingip_1" {
pool = "external-network"
}

Check out the detailed description of the resource openstack_networking_floatingip_v2.

6. Assign an association between the public and private IP address of the cloud server

The public IP address will be connected to the cloud server port and associated with the private IP.

resource "openstack_networking_floatingip_associate_v2" "association_1" {
port_id = openstack_networking_port_v2.port_1.id
floating_ip = openstack_networking_floatingip_v2.floatingip_1.address
}

Check out the detailed description of the resource openstack_networking_floatingip_associate_v2.

6. Create a Managed Kubernetes cluster

  1. Create a fault-tolerant cluster.

  2. Create a node group of arbitrary configuration with a network drive.

1. Create a fault-tolerant cluster

data "servercore_mks_kube_versions_v1" "versions" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
}

resource "servercore_mks_cluster_v1" "cluster_1" {
name = "high_availability_cluster"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
kube_version = data.servercore_mks_kube_versions_v1.versions.latest_version
network_id = openstack_networking_network_v2.network_1.id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
maintenance_window_start = "00:00:00"
}

Here. region — pool in which the cluster will be created, e.g. ru-9.

Check out the detailed description of the resource servercore_mks_cluster_v1.

2. Create a group of nodes of arbitrary configuration with a network disk

  resource "servercore_mks_nodegroup_v1" "nodegroup_1" {
cluster_id = servercore_mks_cluster_v1.cluster_1.id
project_id = servercore_mks_cluster_v1.cluster_1.project_id
region = servercore_mks_cluster_v1.cluster_1.region
availability_zone = "ru-9a"
nodes_count = "2"
cpus = 2
ram_mb = 4096
volume_gb = 32
volume_type = "fast.ru-9a"
install_nvidia_device_plugin = false
labels = {
"label-key0": "label-value0",
"label-key1": "label-value1",
"label-key2": "label-value2",
}
}

Here:

  • availability_zone — pool segment where the group of nodes will be located, e.g. ru-9a;

  • nodes_count — number of working nodes in the node group. The maximum number of nodes is 15;

  • cpus — number of vCPUs for each node;

  • ram_mb — the amount of RAM for each node in MB;

  • volume_gb — disk size in GB;

  • volume_type — disk type in format <type>.<pool_segment>for example basic.ru-9a:

    • <type> — basic, universal or fast;
    • <pool_segment> — pool segment where the network drive will be created, e.g. ru-9a;
  • install_nvidia_device_plugin — Confirms or cancel the installation of the GPU drivers and NVIDIA® Device Plugin:

    • true — for GPU flavors confirms that the GPU drivers and NVIDIA® Device Plugin are installed;
    • false — For both GPU and non-GPU flavors, the installation of the GPU drivers and NVIDIA® Device Plugin is canceled. You can independently Install drivers for GPU node groups.

Check out the detailed description of the resource servercore_mks_nodegroup_v1.

7. Create a cloud database cluster

  1. Create a MySQL semi-sync cluster.

  2. Create a user.

  3. Create a database.

  4. Grant the user access to the database.

1. Create MySQL semi-sync cluster of arbitrary configuration

  data "servercore_dbaas_datastore_type_v1" "datastore_type_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
filter {
engine = "mysql_native"
version = "8"
}
}

resource "servercore_dbaas_mysql_datastore_v1" "datastore_1" {
name = "datastore-1"
project_id = servercore_project_v2.project_1.id
region = "ru-9"
type_id = data.servercore_dbaas_datastore_type_v1.datastore_type_1.datastore_types[0].id
subnet_id = openstack_networking_subnet_v2.subnet_1.id
node_count = 3
flavor {
vcpus = 1
ram = 4096
disk = 32
}
}

Here:

  • region — pool, for example ru-9. The list of available pools can be found in the instructions Availability matrices;
  • filter — A filter of cloud database types:
    • engine — type of cloud databases. For a MySQL semi-sync cluster, specify mysql_native;
    • version — cloud database version. The list of available versions can be found in the instructions Versions and configurations;
  • nodes_count — number of nodes. The maximum number of nodes is 6;
  • flavor — arbitrary cluster configuration. The available values of arbitrary configurations can be found in the instructions Versions and configurations:
    • vcpus — number of vCPUs;
    • ram — the amount of RAM in MB;
    • disk — disk size in GB.

View a detailed description of the data sources and resources:

2. Create a user

resource "servercore_dbaas_user_v1" "user_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
datastore_id = servercore_dbaas_mysql_datastore_v1.datastore_1.id
name = "user"
password = "secret"
}

Here:

  • region — pool in which the cluster resides;
  • name — username;
  • password — user password.

Check out the detailed description of the resource servercore_dbaas_user_v1.

3. Create a database

resource "servercore_dbaas_mysql_database_v1" "database_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
datastore_id = servercore_dbaas_mysql_datastore_v1.datastore_1.id
name = "database_1"
}

Here. region — pool in which the cluster resides.

Check out the detailed description of the resource servercore_dbaas_mysql_database_v1.

4. Grant the user access to the database

resource "servercore_dbaas_grant_v1" "grant_1" {
project_id = servercore_project_v2.project_1.id
region = "ru-9"
datastore_id = servercore_dbaas_mysql_datastore_v1.datastore_1.id
database_id = servercore_dbaas_mysql_database_v1.database_1.id
user_id = servercore_dbaas_user_v1.user_1.id
}

Check out the detailed description of the resource servercore_dbaas_grant_v1.