-
-
Save alexei-led/a4d31ee446a0fbcab845b93fe4a9b09d to your computer and use it in GitHub Desktop.
| #!/bin/bash | |
| # vars | |
| [ -z "$NUM_WORKERS" ] && NUM_WORKERS=3 | |
| # init swarm (need for service command); if not created | |
| docker node ls 2> /dev/null | grep "Leader" | |
| if [ $? -ne 0 ]; then | |
| docker swarm init > /dev/null 2>&1 | |
| fi | |
| # get join token | |
| SWARM_TOKEN=$(docker swarm join-token -q worker) | |
| # get Swarm master IP (Docker for Mac xhyve VM IP) | |
| SWARM_MASTER=$(docker info --format "{{.Swarm.NodeAddr}}") | |
| echo "Swarm master IP: ${SWARM_MASTER}" | |
| sleep 10 | |
| # start Docker registry mirror | |
| docker run -d --restart=always -p 4000:5000 --name v2_mirror \ | |
| -v $PWD/rdata:/var/lib/registry \ | |
| -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io \ | |
| registry:2.5 | |
| # run NUM_WORKERS workers with SWARM_TOKEN | |
| for i in $(seq "${NUM_WORKERS}"); do | |
| # remove node from cluster if exists | |
| docker node rm --force $(docker node ls --filter "name=worker-${i}" -q) > /dev/null 2>&1 | |
| # remove worker contianer with same name if exists | |
| docker rm --force $(docker ps -q --filter "name=worker-${i}") > /dev/null 2>&1 | |
| # run new worker container | |
| docker run -d --privileged --name worker-${i} --hostname=worker-${i} \ | |
| -p ${i}2375:2375 \ | |
| -p ${i}5000:5000 \ | |
| -p ${i}5001:5001 \ | |
| -p ${i}5601:5601 \ | |
| docker:1.13-rc-dind --registry-mirror http://${SWARM_MASTER}:4000 | |
| # add worker container to the cluster | |
| docker --host=localhost:${i}2375 swarm join --token ${SWARM_TOKEN} ${SWARM_MASTER}:2377 | |
| done | |
| # show swarm cluster | |
| printf "\nLocal Swarm Cluster\n===================\n" | |
| docker node ls | |
| # echo swarm visualizer | |
| printf "\nLocal Swarm Visualizer\n===================\n" | |
| docker run -it -d --name swarm_visualizer \ | |
| -p 8000:8080 -e HOST=localhost \ | |
| -v /var/run/docker.sock:/var/run/docker.sock \ | |
| manomarks/visualizer:beta |
| #!/bin/bash | |
| # vars | |
| [ -z "$NUM_WORKERS" ] && NUM_WORKERS=3 | |
| # remove nodes | |
| # run NUM_WORKERS workers with SWARM_TOKEN | |
| for i in $(seq "${NUM_WORKERS}"); do | |
| docker --host localhost:${i}2375 swarm leave > /dev/null 2>&1 | |
| docker rm --force worker-${i} > /dev/null 2>&1 | |
| done | |
| # remove swarm cluster master | |
| docker swarm leave --force > /dev/null 2>&1 | |
| # remove docker mirror | |
| docker rm --force v2_mirror > /dev/null 2>&1 | |
| # remove swarm visuzalizer | |
| docker rm --force swarm_visualizer > /dev/null 2>&1 |
| version: "3" | |
| services: | |
| redis: | |
| image: redis:3.2-alpine | |
| ports: | |
| - "6379" | |
| networks: | |
| - voteapp | |
| deploy: | |
| placement: | |
| constraints: [node.role == manager] | |
| db: | |
| image: postgres:9.4 | |
| volumes: | |
| - db-data:/var/lib/postgresql/data | |
| networks: | |
| - voteapp | |
| deploy: | |
| placement: | |
| constraints: [node.role == manager] | |
| voting-app: | |
| image: gaiadocker/example-voting-app-vote:good | |
| ports: | |
| - 5000:80 | |
| networks: | |
| - voteapp | |
| depends_on: | |
| - redis | |
| deploy: | |
| mode: replicated | |
| replicas: 2 | |
| labels: [APP=VOTING] | |
| placement: | |
| constraints: [node.role == worker] | |
| result-app: | |
| image: gaiadocker/example-voting-app-result:latest | |
| ports: | |
| - 5001:80 | |
| networks: | |
| - voteapp | |
| depends_on: | |
| - db | |
| worker: | |
| image: gaiadocker/example-voting-app-worker:latest | |
| networks: | |
| voteapp: | |
| aliases: | |
| - workers | |
| depends_on: | |
| - db | |
| - redis | |
| # service deployment | |
| deploy: | |
| mode: replicated | |
| replicas: 2 | |
| labels: [APP=VOTING] | |
| # service resource management | |
| resources: | |
| # Hard limit - Docker does not allow to allocate more | |
| limits: | |
| cpus: '0.25' | |
| memory: 512M | |
| # Soft limit - Docker makes best effort to return to it | |
| reservations: | |
| cpus: '0.25' | |
| memory: 256M | |
| # service restart policy | |
| restart_policy: | |
| condition: on-failure | |
| delay: 5s | |
| max_attempts: 3 | |
| window: 120s | |
| # service update configuration | |
| update_config: | |
| parallelism: 1 | |
| delay: 10s | |
| failure_action: continue | |
| monitor: 60s | |
| max_failure_ratio: 0.3 | |
| # placement constraint - in this case on 'worker' nodes only | |
| placement: | |
| constraints: [node.role == worker] | |
| networks: | |
| voteapp: | |
| volumes: | |
| db-data: |
@alexei-led I assume this is multi-services? Did it also work with multi-host? I'm curious what it need to make it multi-host.
@katopz Since the docker-compose.yml file version is 3 I figure you can use it in swarm mode
How to deploy service on worker node. If anyone knows please help : https://stackoverflow.com/questions/51999786/docker-swarm-deploy-containers-on-worker-node
`# docker deploy --compose-file docker-compose.yml VOTE
docker: 'deploy' is not a docker command.
See 'docker --help'
Client: Docker Engine - Community
Version: 20.10.3
API version: 1.41
Go version: go1.13.15
Git commit: 48d30b5
Built: Fri Jan 29 14:33:13 2021
OS/Arch: linux/amd64
Context: default
Experimental: true
Server: Docker Engine - Community
Engine:
Version: 20.10.3
API version: 1.41 (minimum version 1.12)
Go version: go1.13.15
Git commit: 46229ca
Built: Fri Jan 29 14:31:25 2021
OS/Arch: linux/amd64
Experimental: true
containerd:
Version: 1.4.3
GitCommit: 269548fa27e0089a8b8278fc4fc781d7f65a939b
runc:
Version: 1.0.0-rc92
GitCommit: ff819c7e9184c13b7c2607fe6c30ae19403a7aff
docker-init:
Version: 0.19.0
GitCommit: de40ad0
`
Did you initiated docker swarm?
Deploy
Create new swarm cluster with Docker for Mac and docker-in-docker images:
Deploy sample Voting app (5 services) to the Swarm cluster, created above:
Cleanup
Remove
VOTEapplication from the Swarm cluster:Destroy Swarm cluster: