Created
May 25, 2020 03:06
-
-
Save intlabs/3f243a7c2d5c24417319942904fb2248 to your computer and use it in GitHub Desktop.
rook - single node
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| sudo tee /etc/sysctl.d/k8s.conf <<EOF | |
| net.bridge.bridge-nf-call-ip6tables = 1 | |
| net.bridge.bridge-nf-call-iptables = 1 | |
| EOF | |
| sudo sysctl --system | |
| sudo apt-get update | |
| sudo apt-get install -y --no-install-recommends docker.io apt-transport-https curl | |
| sudo tee /etc/systemd/system/docker.service <<EOF | |
| [Unit] | |
| Description=Docker Application Container Engine | |
| Documentation=https://docs.docker.com | |
| BindsTo=containerd.service | |
| After=network-online.target firewalld.service containerd.service | |
| Wants=network-online.target | |
| Requires=docker.socket | |
| [Service] | |
| Type=notify | |
| # the default is not to use systemd for cgroups because the delegate issues still | |
| # exists and systemd currently does not support the cgroup feature set required | |
| # for containers run by docker | |
| ExecStart=/usr/bin/dockerd --exec-opt native.cgroupdriver=systemd -H fd:// --containerd=/run/containerd/containerd.sock | |
| ExecReload=/bin/kill -s HUP $MAINPID | |
| TimeoutSec=0 | |
| RestartSec=2 | |
| Restart=always | |
| # Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229. | |
| # Both the old, and new location are accepted by systemd 229 and up, so using the old location | |
| # to make them work for either version of systemd. | |
| StartLimitBurst=3 | |
| # Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230. | |
| # Both the old, and new name are accepted by systemd 230 and up, so using the old name to make | |
| # this option work for either version of systemd. | |
| StartLimitInterval=60s | |
| # Having non-zero Limit*s causes performance problems due to accounting overhead | |
| # in the kernel. We recommend using cgroups to do container-local accounting. | |
| LimitNOFILE=infinity | |
| LimitNPROC=infinity | |
| LimitCORE=infinity | |
| # Comment TasksMax if your systemd version does not support it. | |
| # Only systemd 226 and above support this option. | |
| TasksMax=infinity | |
| # set delegate yes so that systemd does not reset the cgroups of docker containers | |
| Delegate=yes | |
| # kill only the docker process, not all processes in the cgroup | |
| KillMode=process | |
| [Install] | |
| WantedBy=multi-user.target | |
| EOF | |
| sudo systemctl daemon-reload | |
| sudo systemctl restart docker | |
| sudo systemctl enable --now docker | |
| curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - | |
| sudo tee /etc/apt/sources.list.d/kubernetes.list <<EOF | |
| deb https://apt.kubernetes.io/ kubernetes-xenial main | |
| EOF | |
| sudo apt-get update | |
| sudo apt-get install -y kubelet kubeadm kubectl | |
| sudo apt-mark hold kubelet kubeadm kubectl | |
| sudo tee /var/lib/kubelet/config.yaml <<EOF | |
| apiVersion: kubelet.config.k8s.io/v1beta1 | |
| kind: KubeletConfiguration | |
| cgroupDriver: systemd | |
| EOF | |
| sudo systemctl daemon-reload | |
| sudo systemctl restart kubelet | |
| sudo kubeadm init --pod-network-cidr 172.18.0.1/24 | |
| mkdir -p $HOME/.kube | |
| sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config | |
| sudo chown $(id -u):$(id -g) $HOME/.kube/config | |
| kubectl apply -f https://docs.projectcalico.org/v3.14/manifests/calico.yaml | |
| kubectl taint nodes --all node-role.kubernetes.io/master- | |
| git clone --single-branch --branch release-1.3 https://github.com/rook/rook.git | |
| cd rook/cluster/examples/kubernetes/ceph | |
| kubectl create -f common.yaml | |
| kubectl create -f operator.yaml | |
| kubectl create -f cluster-test.yaml | |
| tee /tmp/toolbox.yaml <<EOF | |
| apiVersion: apps/v1 | |
| kind: Deployment | |
| metadata: | |
| name: rook-ceph-tools | |
| namespace: rook-ceph | |
| labels: | |
| app: rook-ceph-tools | |
| spec: | |
| replicas: 1 | |
| selector: | |
| matchLabels: | |
| app: rook-ceph-tools | |
| template: | |
| metadata: | |
| labels: | |
| app: rook-ceph-tools | |
| spec: | |
| dnsPolicy: ClusterFirstWithHostNet | |
| containers: | |
| - name: rook-ceph-tools | |
| image: rook/ceph:v1.3.4 | |
| command: ["/tini"] | |
| args: ["-g", "--", "/usr/local/bin/toolbox.sh"] | |
| imagePullPolicy: IfNotPresent | |
| env: | |
| - name: ROOK_ADMIN_SECRET | |
| valueFrom: | |
| secretKeyRef: | |
| name: rook-ceph-mon | |
| key: admin-secret | |
| volumeMounts: | |
| - mountPath: /etc/ceph | |
| name: ceph-config | |
| - name: mon-endpoint-volume | |
| mountPath: /etc/rook | |
| volumes: | |
| - name: mon-endpoint-volume | |
| configMap: | |
| name: rook-ceph-mon-endpoints | |
| items: | |
| - key: data | |
| path: mon-endpoints | |
| - name: ceph-config | |
| emptyDir: {} | |
| tolerations: | |
| - key: "node.kubernetes.io/unreachable" | |
| operator: "Exists" | |
| effect: "NoExecute" | |
| tolerationSeconds: 5 | |
| EOF | |
| kubectl create -f /tmp/toolbox.yaml | |
| # ./csi/rbd/storageclass.yaml | |
| tee /tmp/storageclass.yaml <<'EOF' | |
| apiVersion: ceph.rook.io/v1 | |
| kind: CephBlockPool | |
| metadata: | |
| name: replicapool | |
| namespace: rook-ceph | |
| spec: | |
| #failureDomain: host | |
| failureDomain: osd | |
| replicated: | |
| size: 3 | |
| # Disallow setting pool with replica 1, this could lead to data loss without recovery. | |
| # Make sure you're *ABSOLUTELY CERTAIN* that is what you want | |
| requireSafeReplicaSize: true | |
| # gives a hint (%) to Ceph in terms of expected consumption of the total cluster capacity of a given pool | |
| # for more info: https://docs.ceph.com/docs/master/rados/operations/placement-groups/#specifying-expected-pool-size | |
| #targetSizeRatio: .5 | |
| --- | |
| apiVersion: storage.k8s.io/v1 | |
| kind: StorageClass | |
| metadata: | |
| name: rook-ceph-block | |
| provisioner: rook-ceph.rbd.csi.ceph.com | |
| parameters: | |
| # clusterID is the namespace where the rook cluster is running | |
| # If you change this namespace, also change the namespace below where the secret namespaces are defined | |
| clusterID: rook-ceph | |
| # If you want to use erasure coded pool with RBD, you need to create | |
| # two pools. one erasure coded and one replicated. | |
| # You need to specify the replicated pool here in the `pool` parameter, it is | |
| # used for the metadata of the images. | |
| # The erasure coded pool must be set as the `dataPool` parameter below. | |
| #dataPool: ec-data-pool | |
| pool: replicapool | |
| # RBD image format. Defaults to "2". | |
| imageFormat: "2" | |
| # RBD image features. Available for imageFormat: "2". CSI RBD currently supports only `layering` feature. | |
| imageFeatures: layering | |
| # The secrets contain Ceph admin credentials. These are generated automatically by the operator | |
| # in the same namespace as the cluster. | |
| csi.storage.k8s.io/provisioner-secret-name: rook-csi-rbd-provisioner | |
| csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph | |
| csi.storage.k8s.io/controller-expand-secret-name: rook-csi-rbd-provisioner | |
| csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph | |
| csi.storage.k8s.io/node-stage-secret-name: rook-csi-rbd-node | |
| csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph | |
| # Specify the filesystem type of the volume. If not specified, csi-provisioner | |
| # will set default as `ext4`. | |
| csi.storage.k8s.io/fstype: ext4 | |
| # uncomment the following to use rbd-nbd as mounter on supported nodes | |
| # **IMPORTANT**: If you are using rbd-nbd as the mounter, during upgrade you will be hit a ceph-csi | |
| # issue that causes the mount to be disconnected. You will need to follow special upgrade steps | |
| # to restart your application pods. Therefore, this option is not recommended. | |
| #mounter: rbd-nbd | |
| allowVolumeExpansion: true | |
| reclaimPolicy: Delete | |
| EOF | |
| kubectl create -f /tmp/storageclass.yaml | |
| kubectl create -f ../mysql.yaml | |
| kubectl create -f wordpress.yaml |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment