Запуск Mongo 3.6 Replica Set в Docker Swarm

Инструкция запуска реплики Mongo версии 3.6 в Docker Swarm

Настройка Mongo кластера

Создайте внешнюю сеть:

docker network create --subnet 172.22.0.1/16 --driver=overlay --attachable cloud_backend -o "com.docker.network.bridge.name"="cloud_backend"

Создайте volume на 3х серверах

docker volume create database_mongo_key

Создайте на одном хосте файл ключ

openssl rand -base64 700 > /var/lib/docker/volumes/database_mongo_key/_data/mongo.key
chmod 400 /var/lib/docker/volumes/database_mongo_key/_data/mongo.key
chown 999:999 /var/lib/docker/volumes/database_mongo_key/_data/mongo.key

И скопируйте его на другие хосты в ту же папку

Создайте mongo.yaml файл

version: "3.7"

services:

  mongo1_replica1:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: --shardsvr --replSet replica1 --keyFile /data/mongo/mongo.key --journal --port 27017 --bind_ip 0.0.0.0 --auth
    volumes:
      - "mongo_key:/data/mongo"
      - "mongo1_replica1_configdb:/data/configdb"
      - "mongo1_replica1_db:/data/db"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
    logging:
      driver: journald

  mongo2_replica1:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: --shardsvr --replSet replica1 --keyFile /data/mongo/mongo.key --journal --port 27017 --bind_ip 0.0.0.0 --auth
    volumes:
      - "mongo_key:/data/mongo"
      - "mongo2_replica1_configdb:/data/configdb"
      - "mongo2_replica1_db:/data/db"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
    logging:
      driver: journald

  mongo3_replica1:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: --shardsvr --replSet replica1 --keyFile /data/mongo/mongo.key --journal --port 27017 --bind_ip 0.0.0.0 --auth
    volumes:
      - "mongo_key:/data/mongo"
      - "mongo3_replica1_configdb:/data/configdb"
      - "mongo3_replica1_db:/data/db"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
    logging:
      driver: journald
  
  mongo1_config:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: --configsvr --replSet config1 --keyFile /data/mongo/mongo.key --journal --port 27017 --bind_ip 0.0.0.0 --auth
    volumes:
      - "mongo_key:/data/mongo"
      - "mongo1_config_db:/data/configdb"      
      - "mongo1_config_data:/data/db"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
    logging:
      driver: journald

  mongo2_config:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: --configsvr --replSet config1 --keyFile /data/mongo/mongo.key --journal --port 27017 --bind_ip 0.0.0.0 --auth
    volumes:
      - "mongo_key:/data/mongo"
      - "mongo2_config_db:/data/configdb"      
      - "mongo2_config_data:/data/db"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
    logging:
      driver: journald

  mongo3_config:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: --configsvr --replSet config1 --keyFile /data/mongo/mongo.key --journal --port 27017 --bind_ip 0.0.0.0 --auth
    volumes:
      - "mongo_key:/data/mongo"
      - "mongo3_config_db:/data/configdb"      
      - "mongo3_config_data:/data/db"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
    logging:
      driver: journald  
  
  mongo1:
    image: mongo:3.6.23
    hostname: "{{.Task.ID}}.{{.Service.Name}}.local"
    command: mongos --keyFile /data/mongo/mongo.key --configdb config1/mongo1_config:27017,mongo2_config:27017,mongo3_config:27017 --bind_ip 0.0.0.0 --port 27017
    volumes:
      - "mongo_key:/data/mongo"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - mongo
      - cloud_backend
    logging:
      driver: journald  
  
volumes:
  mongo_key:  
  mongo1_config_db:
  mongo1_config_data:
  mongo1_replica1_configdb:
  mongo1_replica1_db:
  mongo2_config_db:
  mongo2_config_data:
  mongo2_replica1_configdb:
  mongo2_replica1_db:
  mongo3_config_db:
  mongo3_config_data:
  mongo3_replica1_configdb:
  mongo3_replica1_db:

networks:
  mongo:
  cloud_backend:
    external: true

Сделайте деплой сервисов

docker stack deploy -c mongo.yaml database --with-registry-auth

Подключитесь к реплике:

docker exec -it $(docker ps -qf label=com.docker.swarm.service.name=database_mongo1_replica1) mongo

Выполните команды:

use admin
rs.initiate({ _id: "replica1", members: [ { _id: 1, host: "mongo1_replica1:27017", priority: 1 } ] });
db.createUser({ user: 'admin', pwd: 'admin', roles: [ { role: 'root', db: 'admin' } ] });
db.auth({ user: 'admin', pwd: 'admin' })
rs.add({host: "mongo2_replica1:27017", priority: 1})
rs.add({host: "mongo3_replica1:27017", priority: 1})

Подключитесь к конфиг серверу:

docker exec -it $(docker ps -qf label=com.docker.swarm.service.name=database_mongo1_config) mongo

Выполните команды:

use admin
rs.initiate({ _id: "config1", members: [ { _id: 1, host: "mongo1_config:27017", priority: 1 } ] });
db.createUser({ user: 'admin', pwd: 'admin', roles: [ { role: 'root', db: 'admin' } ] });
db.auth({ user: 'admin', pwd: 'admin' })
rs.add({host: "mongo2_config:27017", priority: 1})
rs.add({host: "mongo3_config:27017", priority: 1})

Подключитесь к монго прокси:

docker exec -it $(docker ps -qf label=com.docker.swarm.service.name=database_mongo1) mongo

Выполните команды:

use admin
db.auth({ user: 'admin', pwd: 'admin' })
sh.addShard("replica1/mongo1_replica1")

Создайте тестовую базу данных:

use test
db.createCollection('test');
sh.enableSharding('test');

Установка Rockmongo

Создайте файл mongo_adminer.yaml

version: "3.7"

services:

  mongo_adminer:
    image: bayrell/alpine_mongo_mysql_adminer:1.0-1
    hostname: "{{.Service.Name}}.{{.Task.ID}}.local"
    environment:
      MONGO_CONFIG: >
        [
          {
            "mongo_name": "Mongo1",
            "mongo_host": "mongodb://mongo1",
            "mongo_port": "",
            "mongo_timeout": 0,
            "mongo_auth": true
          },
          {
            "mongo_name": "replicaSet1",
            "mongo_options": { "replicaSet": "replica1" },
            "mongo_host": "mongodb://mongo1_replica1,mongo2_replica1,mongo3_replica1",
            "mongo_port": "",
            "mongo_timeout": 0,
            "mongo_auth": true
          },
          {
            "mongo_name": "configServer1",
            "mongo_options": { "replicaSet": "config1" },
            "mongo_host": "mongodb://mongo1_config,mongo2_config,mongo3_config",
            "mongo_port": "",
            "mongo_timeout": 0,
            "mongo_auth": true
          }
        ]
    volumes:
      - "mongo_adminer_php:/data"
    deploy:
      replicas: 1
      endpoint_mode: dnsrr
      update_config:
        parallelism: 1
        failure_action: rollback
        delay: 5s
      restart_policy:
        condition: "on-failure"
        delay: 10s
        window: 120s
      placement:
        constraints:
          - node.labels.name == docker0
    networks:
      - cloud_backend
    logging:
      driver: journald

volumes:
  mongo_adminer_php:

networks:
  cloud_backend:
    external: true

Сделайте deploy

docker stack deploy -c mongo_adminer.yaml database --with-registry-auth

Полезные команды

Инициализация реплики

rs.initiate()

Добавление хоста в реплику

rs.add("host:27001")

Добавить хост с первым приоритетом

rs.add({host: "host:27017", priority: 1})

Удаление из реплики

rs.remove("host:27001")

Добавление арбитра

rs.addArb("host:27001")

Изменение приоритетов

cfg = rs.conf()
cfg.members[0].priority = 2 
cfg.members[1].priority = 3       
cfg.members[2].priority = 1 
rs.reconfig(cfg, {force : true})

Чем выше цифра приоритета, тем ниже сам приоритет при выборе Primary узла.

Полезные ссылки

  1. Документация Mongodb Replication
  2. Репликация и обновление MongoDB на Ubuntu
  3. Running a MongoDB Replica Set on Docker 1.12 Swarm Mode: Step by Step
  4. Конфигурация Replica Set
  5. Mongo docker swarm fully automated cluster