diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..c19e2b9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,26 @@
+_build/
+/AUTHORS
+/ChangeLog
+/dist/
+/otcdocstheme.egg-info/
+.DS_Store
+*.pyc
+doc/build
+doc/source/BBresult
+api-ref/build
+.tox
+/.venv
+*.swp
+*.log
+*.egg*
+
+# Editors
+*~
+
+# Files created by releasenotes build
+releasenotes/build
+
+# Files created by the LaTeX/PDF build
+/.aux
+/.fdb_latexmk
+/texput.fls
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..1a96af9
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,32 @@
+default_language_version:
+ python: python3
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: 9136088a246768144165fcc3ecc3d31bb686920a # v3.3.0
+ hooks:
+ - id: trailing-whitespace
+ # Replaces or checks mixed line ending
+ - id: mixed-line-ending
+ args: ['--fix', 'lf']
+ exclude: '.*\.(svg)$'
+ # Forbid files which have a UTF-8 byte-order marker
+ - id: check-byte-order-marker
+ # Checks that non-binary executables have a proper shebang
+ - id: check-executables-have-shebangs
+ # Check for files that contain merge conflict strings.
+ - id: check-merge-conflict
+ # Check for debugger imports and py37+ breakpoint()
+ # calls in python source
+ - id: debug-statements
+ - id: check-yaml
+ files: .*\.(yaml|yml)$
+ - repo: local
+ hooks:
+ - id: flake8
+ name: flake8
+ additional_dependencies:
+ - hacking>=3.0.1,<3.1.0
+ language: python
+ entry: flake8
+ files: '^.*\.py$'
+ exclude: '^(doc|releasenotes|tools)/.*$'
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 0000000..bc0e80a
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,5 @@
+- project:
+ merge-mode: squash-merge
+ default-branch: main
+ templates:
+ - publish-otc-docs-hc-pti
diff --git a/bindep.txt b/bindep.txt
new file mode 100644
index 0000000..5bee0c6
--- /dev/null
+++ b/bindep.txt
@@ -0,0 +1,5 @@
+libffi-dev [platform:dpkg]
+libffi-devel [platform:rpm]
+libssl-dev [platform:dpkg]
+openssl-devel [platform:rpm]
+graphviz [doc]
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..0528130
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,12 @@
+# The order of packages is significant, because pip processes them in the order
+# of appearance. Changing the order has an impact on the overall integration
+# process, which may cause wedges in the gate later.
+# Notes:
+# reno needs openstackdocstheme which needs reno (cycle dep).
+# os-api-ref needs openstackdocstheme which needs os-api-ref (cycle dep).
+# Put them in here will make it clear that those are only needed for
+# docs.
+
+sphinx==4.0.0
+otcdocstheme
+reno>=3.1.0 # Apache-2.0
diff --git a/doc/source/_static/css/.placeholder b/doc/source/_static/css/.placeholder
new file mode 100644
index 0000000..e69de29
diff --git a/doc/source/_static/css/default.css b/doc/source/_static/css/default.css
new file mode 100644
index 0000000..e2c3119
--- /dev/null
+++ b/doc/source/_static/css/default.css
@@ -0,0 +1,7 @@
+.literal-block-wrapper {
+ padding: 0 !important;
+}
+
+.highlight .nv {
+ color: var(--dt-color-text-standard) !important;
+}
diff --git a/doc/source/_static/favicon.ico b/doc/source/_static/favicon.ico
new file mode 100644
index 0000000..cf837fc
Binary files /dev/null and b/doc/source/_static/favicon.ico differ
diff --git a/doc/source/_static/images/ansible.svg b/doc/source/_static/images/ansible.svg
new file mode 100644
index 0000000..b8196d5
--- /dev/null
+++ b/doc/source/_static/images/ansible.svg
@@ -0,0 +1,10 @@
+
+
+
\ No newline at end of file
diff --git a/doc/source/_static/images/docker.svg b/doc/source/_static/images/docker.svg
new file mode 100644
index 0000000..a2473ad
--- /dev/null
+++ b/doc/source/_static/images/docker.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/doc/source/_static/images/golang.svg b/doc/source/_static/images/golang.svg
new file mode 100644
index 0000000..cab722f
--- /dev/null
+++ b/doc/source/_static/images/golang.svg
@@ -0,0 +1,45 @@
+
diff --git a/doc/source/_static/images/javascript.svg b/doc/source/_static/images/javascript.svg
new file mode 100644
index 0000000..b1428d6
--- /dev/null
+++ b/doc/source/_static/images/javascript.svg
@@ -0,0 +1,54 @@
+
+
diff --git a/doc/source/_static/images/k8_pod.png b/doc/source/_static/images/k8_pod.png
new file mode 100644
index 0000000..e498a9a
Binary files /dev/null and b/doc/source/_static/images/k8_pod.png differ
diff --git a/doc/source/_static/images/k8_svc.png b/doc/source/_static/images/k8_svc.png
new file mode 100644
index 0000000..8cca480
Binary files /dev/null and b/doc/source/_static/images/k8_svc.png differ
diff --git a/doc/source/_static/images/otc.png b/doc/source/_static/images/otc.png
new file mode 100644
index 0000000..8ec4115
Binary files /dev/null and b/doc/source/_static/images/otc.png differ
diff --git a/doc/source/_static/images/python.svg b/doc/source/_static/images/python.svg
new file mode 100644
index 0000000..366f52f
--- /dev/null
+++ b/doc/source/_static/images/python.svg
@@ -0,0 +1,113 @@
+
+
+
diff --git a/doc/source/_static/images/rancher.svg b/doc/source/_static/images/rancher.svg
new file mode 100644
index 0000000..e583873
--- /dev/null
+++ b/doc/source/_static/images/rancher.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/doc/source/_static/images/terraform.svg b/doc/source/_static/images/terraform.svg
new file mode 100644
index 0000000..4f76c26
--- /dev/null
+++ b/doc/source/_static/images/terraform.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/doc/source/_static/images/users.png b/doc/source/_static/images/users.png
new file mode 100644
index 0000000..5cb409b
Binary files /dev/null and b/doc/source/_static/images/users.png differ
diff --git a/doc/source/_static/images/vault.png b/doc/source/_static/images/vault.png
new file mode 100644
index 0000000..cd36e58
Binary files /dev/null and b/doc/source/_static/images/vault.png differ
diff --git a/doc/source/_static/images/zookeeper.png b/doc/source/_static/images/zookeeper.png
new file mode 100644
index 0000000..16e0604
Binary files /dev/null and b/doc/source/_static/images/zookeeper.png differ
diff --git a/doc/source/cce_vault.rst b/doc/source/cce_vault.rst
new file mode 100644
index 0000000..ae56308
--- /dev/null
+++ b/doc/source/cce_vault.rst
@@ -0,0 +1,618 @@
+===============================================
+Secrets management with CCE and Hashicorp Vault
+===============================================
+
+Most modern IT setups are composed of several subsystems like databases, object
+stores, master controller, node access, and more. To access one component from
+another, some form of credentials are required. Configuring and storing these
+secrets directly in the components is considered as an antipattern, since a
+vulnerability of one component may iteratively affect the security of the whole
+setup.
+
+With centralized secret management it becomes unnecessary to keep secrets used
+by various applications spreaded across DevOps environments. This helps to close
+some security attack vectors (like `secret sprawl
+`_,
+`security islands `_), but
+usually introduces a problem of the so-called `Secret Zero
+`_
+as a key to the key storage.
+
+Vault is an open-source software, provided and maintained by Hashicorp, that
+addresses this very problem. It is considered one of the reference solutions
+for it. This article demonstrates how to utilize infrastructure authorization
+with Hashicorp Vault in an CCE-powered setup. As an example workload, we deploy
+a Zookeeper cluster with enabled TLS protection. Certificates for Zookeeper are
+stored in Vault, and they oblige required practices like rotations or audits.
+Zookeper can easily be replaced by any other component that requires access to
+internal credentials.
+
+Overview
+========
+
+.. graphviz:: dot/cce_vault_overview.dot
+ :layout: dot
+
+TLS secrets are kept in the Vault. They are being read by Vault Agent component
+running as a sidecar in Zookeeper service pod and writes certificates onto the
+file system. Zookeeper services reads certificates populated by Agent. Vault
+Agent is configured to use password-less access to Vault. Further in the
+document it is explained how exactly this is implemented.
+
+Establishing trust between CCE and Vault
+========================================
+
+Before any application managed by the CCE is able to login to Vault relying on
+infrastructure based authentication it is required to do some steps on the
+Vault side. Kubernetes auth plugin is enabled and configured to only access
+requests from specific Kubernetes cluster by providing its Certificate
+Authority. To allow several multiple different CCE clusters to use Vault, a
+dedicated auth path is going to be used.
+
+.. code-block:: shell
+
+ $ vault auth enable -path kubernetes_cce1 kubernetes
+ $ vault write auth/kubernetes_cce1/config \
+ kubernetes_host="$K8S_HOST" \
+ kubernetes_ca_cert="$SA_CA_CRT"
+
+Since in our example a dedicated service account with token is being
+periodically rotated using `client JWT as reviewer JWT
+`_
+can be used.
+
+Access rules for Vault
+======================
+
+Having Auth plugin enabled, as described above, CCE workloads are able to
+authenticate to Vault, but they can do nothing. It is now necessary to
+establish further level of authorization and let particular service accounts of
+CCE to get access to secrets in Vault.
+
+For the scope of the use case, we grant the Zookeeper service account from its
+namespace access to the TLS secrets stored in Vault's key-value store. For that
+a policy providing a read-only access to the /tls/zk* and /tls/ca paths is
+created.
+
+.. code-block:: shell
+
+ $ vault policy write tls-zk-ro - <`_
+
+* Use `PKI secrets engine `_ to
+ issue certificates
+
+Vault enables users not only to store TLS certificates data in the key-value store,
+but also to create and revoke them. To keep this tutorial simple enough we are
+not going to do this and just upload generated certificates into the KV store.
+For production setups this example can be easily extended with extra actions.
+
+.. code-block:: shell
+
+ $ vault kv put secret/tls/ca certificate=@ca.crt
+ $ vault kv put secret/tls/zk_server certificate=@zk_server.crt private_key=@zk_server.key
+ $ vault kv put secret/tls/zk_client certificate=@zk_client.crt private_key=@zk_client.key
+
+Certificate paths and property names used here are referenced by the Zookeeper installation.
+
+Deploying Zookeeper
+===================
+
+Now that the secrets are stored safely in Vault and only allowed applications
+can fetch them it is time to look how exactly the application accesses the
+secrets. Generally, utilizing Vault requires modification of the application.
+`Vault agent `_ is a tool that was
+created to simplify secrets delivery for applications when it is hard or
+difficult to change the application itself. The Agent is taking care of reading
+secrets from Vault and can deliver them to the file system.
+
+There are many way how to properly implement Zookeeper service on the
+Kubernetes. The scope of the blueprint is not Zookeeper itself, but
+demostrating how an application can be supplied by required certificates. The
+reference architecture described here bases on the best practices gathered from
+various sources and extended by HashiCorp Vault. It overrides default Zookeeper
+start scripts in order to allow better control of the runtime settings and
+properly fill all required configuration options for TLS to work. Other methods
+of deploying Zookeeper can be easily used here instead.
+
+1. Create a Kubernetes namespace named `zookeeper`.
+
+.. code-block:: shell
+
+ $ kubectl create namespace zookeeper
+
+2. Create a Kubernetes service account named `zookeeper`.
+
+.. code-block:: shell
+
+ $ kubectl create serviceaccount zookeeper
+
+3. In Kubernetes a *service account* provides an identity for the services
+ running in the pod so that the process can access Kubernetes API. The same
+ identity can be used to access Vault, but require one special permission -
+ access to the tokenreview API of the Kubernetes. When instead a dedicated
+ reviewer JWT is used, this step is not necessary, but it also means
+ long-living sensitive data is used and frequently transferred over the
+ network. More details on various ways to use Kubernetes tokens to authorize
+ to Vault `can be found here
+ `_.
+
+.. code-block:: shell
+
+ $ kubectl create clusterrolebinding vault-client-auth-delegator \
+ --clusterrole=system:auth-delegator \
+ --serviceaccount=zookeeper:zookeeper
+
+4. Create a Kubernetes ConfigMap with all required configurations. One possible
+ approach is to define dedicated health and readiness check scripts and to
+ override automatically created Zookeeper start script. This is especially
+ useful when TLS protection is enabled, but default container scripts do not
+ support this.
+
+.. code-block:: yaml
+ :caption: zookeeper-cm.yaml
+
+ ---
+ apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: zookeeper-config
+ namespace: "zookeeper"
+ data:
+ ok: |
+ #!/bin/sh
+ # This sript is used by live-check of Kubernetes pod
+ if [ -f /tls/ca.pem ]; then
+ echo "srvr" | openssl s_client -CAfile /tls/ca.pem -cert /tls/client/tls.crt \
+ -key /tls/client/tls.key -connect 127.0.0.1:${1:-2281} -quiet -ign_eof 2>/dev/null | grep Mode
+
+ else
+ zkServer.sh status
+ fi
+
+ ready: |
+ #!/bin/sh
+ # This sript is used by readiness-check of Kubernetes pod
+ if [ -f /tls/ca.pem ]; then
+ echo "ruok" | openssl s_client -CAfile /tls/ca.pem -cert /tls/client/tls.crt \
+ -key /tls/client/tls.key -connect 127.0.0.1:${1:-2281} -quiet -ign_eof 2>/dev/null
+ else
+ echo ruok | nc 127.0.0.1 ${1:-2181}
+ fi
+
+ run: |
+ #!/bin/bash
+ # This is the main starting script
+ set -a
+ ROOT=$(echo /apache-zookeeper-*)
+ ZK_USER=${ZK_USER:-"zookeeper"}
+ ZK_LOG_LEVEL=${ZK_LOG_LEVEL:-"INFO"}
+ ZK_DATA_DIR=${ZK_DATA_DIR:-"/data"}
+ ZK_DATA_LOG_DIR=${ZK_DATA_LOG_DIR:-"/data/log"}
+ ZK_CONF_DIR=${ZK_CONF_DIR:-"/conf"}
+ ZK_CLIENT_PORT=${ZK_CLIENT_PORT:-2181}
+ ZK_SSL_CLIENT_PORT=${ZK_SSL_CLIENT_PORT:-2281}
+ ZK_SERVER_PORT=${ZK_SERVER_PORT:-2888}
+ ZK_ELECTION_PORT=${ZK_ELECTION_PORT:-3888}
+ ID_FILE="$ZK_DATA_DIR/myid"
+ ZK_CONFIG_FILE="$ZK_CONF_DIR/zoo.cfg"
+ LOG4J_PROPERTIES="$ZK_CONF_DIR/log4j.properties"
+ HOST=$(hostname)
+ DOMAIN=`hostname -d`
+ APPJAR=$(echo $ROOT/*jar)
+ CLASSPATH="${ROOT}/lib/*:${APPJAR}:${ZK_CONF_DIR}:"
+ if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
+ NAME=${BASH_REMATCH[1]}
+ ORD=${BASH_REMATCH[2]}
+ MY_ID=$((ORD+1))
+ else
+ echo "Failed to extract ordinal from hostname $HOST"
+ exit 1
+ fi
+ mkdir -p $ZK_DATA_DIR
+ mkdir -p $ZK_DATA_LOG_DIR
+ echo $MY_ID >> $ID_FILE
+
+ echo "dataDir=$ZK_DATA_DIR" >> $ZK_CONFIG_FILE
+ echo "dataLogDir=$ZK_DATA_LOG_DIR" >> $ZK_CONFIG_FILE
+ echo "4lw.commands.whitelist=*" >> $ZK_CONFIG_FILE
+ # Client TLS configuration
+ if [[ -f /tls/ca.pem ]]; then
+ echo "secureClientPort=$ZK_SSL_CLIENT_PORT" >> $ZK_CONFIG_FILE
+ echo "ssl.keyStore.location=/tls/client/client.pem" >> $ZK_CONFIG_FILE
+ echo "ssl.trustStore.location=/tls/ca.pem" >> $ZK_CONFIG_FILE
+ else
+ echo "clientPort=$ZK_CLIENT_PORT" >> $ZK_CONFIG_FILE
+ fi
+ # Server TLS configuration
+ if [[ -f /tls/ca.pem ]]; then
+ echo "serverCnxnFactory=org.apache.zookeeper.server.NettyServerCnxnFactory" >> $ZK_CONFIG_FILE
+ echo "sslQuorum=true" >> $ZK_CONFIG_FILE
+ echo "ssl.quorum.keyStore.location=/tls/server/server.pem" >> $ZK_CONFIG_FILE
+ echo "ssl.quorum.trustStore.location=/tls/ca.pem" >> $ZK_CONFIG_FILE
+ fi
+ for (( i=1; i<=$ZK_REPLICAS; i++ ))
+ do
+ echo "server.$i=$NAME-$((i-1)).$DOMAIN:$ZK_SERVER_PORT:$ZK_ELECTION_PORT" >> $ZK_CONFIG_FILE
+ done
+ rm -f $LOG4J_PROPERTIES
+ echo "zookeeper.root.logger=$ZK_LOG_LEVEL, CONSOLE" >> $LOG4J_PROPERTIES
+ echo "zookeeper.console.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
+ echo "zookeeper.log.threshold=$ZK_LOG_LEVEL" >> $LOG4J_PROPERTIES
+ echo "zookeeper.log.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
+ echo "zookeeper.log.file=zookeeper.log" >> $LOG4J_PROPERTIES
+ echo "zookeeper.log.maxfilesize=256MB" >> $LOG4J_PROPERTIES
+ echo "zookeeper.log.maxbackupindex=10" >> $LOG4J_PROPERTIES
+ echo "zookeeper.tracelog.dir=$ZK_DATA_LOG_DIR" >> $LOG4J_PROPERTIES
+ echo "zookeeper.tracelog.file=zookeeper_trace.log" >> $LOG4J_PROPERTIES
+ echo "log4j.rootLogger=\${zookeeper.root.logger}" >> $LOG4J_PROPERTIES
+ echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender" >> $LOG4J_PROPERTIES
+ echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}" >> $LOG4J_PROPERTIES
+ echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout" >> $LOG4J_PROPERTIES
+ echo "log4j.appender.CONSOLE.layout.ConversionPattern=\
+ %d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n" >> $LOG4J_PROPERTIES
+ if [ -n "$JMXDISABLE" ]
+ then
+ MAIN=org.apache.zookeeper.server.quorum.QuorumPeerMain
+ else
+ MAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMXPORT \
+ -Dcom.sun.management.jmxremote.authenticate=$JMXAUTH \
+ -Dcom.sun.management.jmxremote.ssl=$JMXSSL \
+ -Dzookeeper.jmx.log4j.disable=$JMXLOG4J \
+ org.apache.zookeeper.server.quorum.QuorumPeerMain"
+ fi
+ set -x
+ exec java -cp "$CLASSPATH" $JVMFLAGS $MAIN $ZK_CONFIG_FILE
+
+ vault-agent-config.hcl: |
+ exit_after_auth = true
+ pid_file = "/home/vault/pidfile"
+ auto_auth {
+ method "kubernetes" {
+ mount_path = "auth/kubernetes_cce1"
+ config = {
+ role = "zookeeper"
+ token_path = "/run/secrets/tokens/vault-token"
+ }
+ }
+ sink "file" {
+ config = {
+ path = "/home/vault/.vault-token"
+ }
+ }
+ }
+
+ cache {
+ use_auto_auth_token = true
+ }
+
+ # ZK is neat-picky on cert file extensions
+ template {
+ destination = "/tls/ca.pem"
+ contents = <` with the address of the
+ Vault server. This includes a pod with Vault Agent side container as an init
+ container, Vault Agent side container used continuously in the run cycle of
+ the pod and Zookeeper main container.
+
+.. code-block:: yaml
+ :caption: zookeeper-ss.yaml
+
+ apiVersion: apps/v1
+ kind: StatefulSet
+ spec:
+ podManagementPolicy: Parallel
+ replicas: 3
+ selector:
+ matchLabels:
+ app: zookeeper
+ component: server
+ serviceName: zookeeper-headless
+ template:
+ metadata:
+ labels:
+ app: zookeeper
+ component: server
+ spec:
+ containers:
+
+ - args:
+ - agent
+ - -config=/etc/vault/vault-agent-config.hcl
+ - -log-level=debug
+ - -exit-after-auth=false
+ env:
+ - name: VAULT_ADDR
+ value:
+ image: vault:1.9.0
+ name: vault-agent-sidecar
+ volumeMounts:
+ - mountPath: /etc/vault
+ name: vault-agent-config
+ - mountPath: /tls
+ name: cert-data
+ - mountPath: /var/run/secrets/tokens
+ name: k8-tokens
+
+ - command:
+ - /bin/bash
+ - -xec
+ - /config-scripts/run
+ env:
+ - name: ZK_REPLICAS
+ value: "3"
+ - name: ZOO_PORT
+ value: "2181"
+ - name: ZOO_STANDALONE_ENABLED
+ value: "false"
+ - name: ZOO_TICK_TIME
+ value: "2000"
+ image: zookeeper:3.7.0
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - /config-scripts/ok
+ failureThreshold: 2
+ initialDelaySeconds: 20
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 5
+ name: zookeeper
+ ports:
+ - containerPort: 2281
+ name: client
+ protocol: TCP
+ - containerPort: 2888
+ name: server
+ protocol: TCP
+ - containerPort: 3888
+ name: election
+ protocol: TCP
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - /config-scripts/ready
+ failureThreshold: 2
+ initialDelaySeconds: 20
+ periodSeconds: 30
+ successThreshold: 1
+ timeoutSeconds: 5
+ securityContext:
+ runAsUser: 1000
+ volumeMounts:
+ - mountPath: /data
+ name: datadir
+ - mountPath: /tls
+ name: cert-data
+ - mountPath: /config-scripts
+ name: zookeeper-config
+ dnsPolicy: ClusterFirst
+
+ initContainers:
+ - args:
+ - agent
+ - -config=/etc/vault/vault-agent-config.hcl
+ - -log-level=debug
+ - -exit-after-auth=true
+ env:
+ - name: VAULT_ADDR
+ value:
+ image: vault:1.9.0
+ name: vault-agent
+ volumeMounts:
+ - mountPath: /etc/vault
+ name: vault-agent-config
+ - mountPath: /tls
+ name: cert-data
+ - mountPath: /var/run/secrets/tokens
+ name: k8-tokens
+ restartPolicy: Always
+ serviceAccount: zookeeper
+ serviceAccountName: zookeeper
+ terminationGracePeriodSeconds: 1800
+ volumes:
+ - configMap:
+ defaultMode: 420
+ items:
+ - key: vault-agent-config.hcl
+ path: vault-agent-config.hcl
+ name: zookeeper-config
+ name: vault-agent-config
+ - configMap:
+ defaultMode: 365
+ name: zookeeper-config
+ name: zookeeper-config
+ - emptyDir: {}
+ name: cert-data
+ - name: k8-tokens
+ projected:
+ defaultMode: 420
+ sources:
+ - serviceAccountToken:
+ expirationSeconds: 7200
+ path: vault-token
+
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: datadir
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+ storageClassName: csi-disk
+ volumeMode: Filesystem
+
+.. code-block:: bash
+
+ $ kubectl apply -f zookeeper-ss.yaml
+
+With this a production-ready Zookeeper service with enabled TLS has been
+deployed sucessfully to the CCE. The Vault Agent takes care of authorizing to
+HashiCorp Vault using a Kubernetes service account with a short time to live
+token and fetches required secrets to the file system. In the entire Kubernetes
+deployment there are no secrets for the application, neither the key to the
+Vault, nor TLS certificates themselves. Not even using Kubernetes secrets is
+necessary.
+
+References
+==========
+
+* https://learn.hashicorp.com/tutorials/vault/agent-kubernetes?in=vault/app-integration
+
+* https://learn.hashicorp.com/tutorials/vault/agent-kubernetes?in=vault/auth-methods
+
+* https://www.vaultproject.io/docs/auth/kubernetes
diff --git a/doc/source/conf.py b/doc/source/conf.py
new file mode 100644
index 0000000..0f8819a
--- /dev/null
+++ b/doc/source/conf.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+#
+# -- General configuration ------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+
+extensions = [
+ 'sphinx.ext.graphviz',
+ 'otcdocstheme',
+]
+
+# openstackdocstheme options
+otcdocs_repo_name = 'opentelekomcloud-blueprints/docs'
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+copyright = u'2017-2021, OpenTelekomCloud Contributors'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'otcdocs'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+otcdocs_auto_version = False
+
+html_favicon = '_static/favicon.ico'
+
+# To use the API Reference sidebar dropdown menu,
+# uncomment the html_theme_options parameter. The theme
+# variable, sidebar_dropdown, should be set to `api_ref`.
+# Otherwise, the list of links for the User and Ops docs
+# appear in the sidebar dropdown menu.
+html_theme_options = {
+ 'show_other_versions': False,
+ 'sidebar_mode': 'toctree',
+}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+html_css_files = [
+ 'css/default.css'
+]
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'native'
+
+templates_path = ['templates']
+
+graphviz_output_format = 'svg'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ ('index', 'os-doc-blueprints.tex', u'os-doc-blueprints Documentation',
+ u'OpenTelekomCloud Contributors', 'manual'),
+]
diff --git a/doc/source/dot/cce_vault_overview.dot b/doc/source/dot/cce_vault_overview.dot
new file mode 100644
index 0000000..335d37e
--- /dev/null
+++ b/doc/source/dot/cce_vault_overview.dot
@@ -0,0 +1,23 @@
+digraph cce_vault {
+ graph [bgcolor=transparent compound=true fontcolor="#2D3436"
+ fontname="Sans-Serif" fontsize=10 rankdir="RL"]
+ node [fixedsize=false]
+ subgraph cluster_cce {
+ graph [bgcolor="#E5F5FD" shape=box style=rounded label="CCE" rankdir="LR"]
+ zk_svc [label="ZK Service" fixedsize=True fontsize=10 height=1.4
+ image="../_static/images/k8_svc.png" imagescale="true" labelloc=b
+ shape=none width=1 rank="min"]
+ zk_pod1 [label="ZK Pod" fixedsize=True fontsize=10 height=1.4 image="../_static/images/k8_pod.png" imagescale="true" labelloc=b shape=none width=1 rank="same"]
+ zk_pod2 [label="ZK Pod" fixedsize=True fontsize=10 height=1.4 image="../_static/images/k8_pod.png" imagescale="true" labelloc=b shape=none width=1 rank="same"]
+ zk_pod3 [label="ZK Pod" fixedsize=True fontsize=10 height=1.4 image="../_static/images/k8_pod.png" imagescale="true" labelloc=b shape=none width=1 rank="same"]
+ zk_pod1 -> zk_svc
+ zk_pod2 -> zk_svc
+ zk_pod3 -> zk_svc
+ }
+ vault -> zk_pod1
+ vault -> zk_pod2
+ vault -> zk_pod3
+ vault [label="Vault" fixedsize=True fontsize=10 height=1.4 image="../_static/images/vault.png" imagescale="true" labelloc=b shape=none width=1]
+ user [label=Clients fixedsize=true fontsize=10 height=1.4 image="../_static/images/users.png" imagescale=true labelloc=b shape=none width=1]
+ zk_svc -> user [dir=both]
+}
diff --git a/doc/source/index.rst b/doc/source/index.rst
new file mode 100644
index 0000000..32eee9c
--- /dev/null
+++ b/doc/source/index.rst
@@ -0,0 +1,15 @@
+Blueprints
+==========
+
+Users sometimes identify use cases that can be solved in a standardized way to
+save research time and effort. Blueprints are a series of best practices,
+curated by the Open Telekom Cloud engineering and architecture teams. While
+they are not covered directly by the `Service description
+`_, they are tested and
+validated recommendations from our experts.
+
+
+.. toctree::
+ :maxdepth: 1
+
+ cce_vault.rst
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..608098c
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,21 @@
+[metadata]
+name = blueprints
+summary = OpenTelekomCloud Blueprints docs
+author = Artem Goncharov
+author_email = artem.goncharov@gmail.com
+home_page = https://cloud.otc.com
+python_requires = >= 3.6
+classifier =
+ Environment :: OpenStack
+ Environment :: Other Environment
+ Intended Audience :: Information Technology
+ Intended Audience :: System Administrators
+ License :: OSI Approved :: Apache Software License
+ Operating System :: POSIX :: Linux
+ Programming Language :: Python
+ Programming Language :: Python :: 3
+ Programming Language :: Python :: 3.5
+ Programming Language :: Python :: 3.6
+ Programming Language :: Python :: 3.7
+ Programming Language :: Python :: 3.8
+ Topic :: Documentation
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..6345380
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,18 @@
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import setuptools
+
+setuptools.setup(
+ setup_requires=['pbr>=2.0.0'],
+ pbr=True)
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 0000000..966a2db
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,26 @@
+[tox]
+minversion = 3.6.0
+envlist = docs
+ignore_basepython_conflict = true
+
+[testenv]
+basepython = python3
+usedevelop = true
+deps =
+ # -c{env:TOX_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/upper-constraints.txt}
+ -r{toxinidir}/doc/requirements.txt
+sitepackages = False
+whitelist_externals =
+ rm
+
+[testenv:venv]
+commands = {posargs}
+
+[testenv:docs]
+deps =
+ -r{toxinidir}/doc/requirements.txt
+ pre-commit
+commands =
+ # pre-commit run -a
+ rm -rf doc/build/hml doc/build/doctrees
+ sphinx-build -a -E -W -d doc/build/doctrees -b html doc/source doc/build/html