21 Commits

Author SHA1 Message Date
mr
cbf32fff48 oc-deploy can now deploy Demo 2026-02-06 10:27:42 +01:00
mr
90691a5ec7 update date demo 2026-02-02 14:54:04 +01:00
mr
a4210d08c4 oc-k8s + bootstrap_dht 2026-01-27 16:04:45 +01:00
mr
be080d7511 Demo 2026-01-12 13:35:25 +01:00
mr
cc8d599ce5 deploy demo alpr 2025-11-17 08:08:56 +01:00
mr
a7d1cc3429 argo.sh 2025-11-07 11:31:47 +01:00
mr
934f00d749 deploy 2025-11-07 10:37:11 +01:00
mr
9e1686a78d Start demo improved 2025-11-06 16:43:52 +01:00
mr
140bd63559 deploy adjustment 2025-06-16 09:14:36 +02:00
mr
90cc774341 clone debugged 2025-04-29 10:30:28 +02:00
mr
db10baf460 update 2025-04-28 14:11:18 +02:00
mr
53fca60178 Merge branch 'main' of https://cloud.o-forge.io/core/oc-deploy into main 2025-04-28 09:46:44 +02:00
mr
8b53c2e70e update oc-deploy 2025-04-28 09:45:54 +02:00
pb
3892692a07 corrected grafana data source file 2025-04-08 11:46:26 +02:00
mr
7ec310f161 full 80 2025-04-03 16:30:32 +02:00
mr
ced5e55698 git ignore deployed 2025-04-01 10:14:51 +02:00
mr
7cdb02b677 deployed 2025-04-01 10:13:55 +02:00
mr
82aed0fdb6 add datas 2025-03-27 13:32:27 +01:00
mr
626a1b1f22 oc-deploy vanilla k8s docker 2025-03-27 13:21:52 +01:00
mr
3b7c3a9526 deploy auto traefik 2025-03-06 09:39:07 +01:00
mr
0a96827200 dev launch mode 2025-03-06 09:34:04 +01:00
131 changed files with 2812 additions and 1601 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
k8s/deployed_config

View File

@@ -1,3 +1,34 @@
# RUN DOCKER DEMO
ADD a clean argo
```
./run_argo.sh
```
Verify with `kubectl get pods -n argo -w` -> all server are running and 1/1
Any problem with this, can be a top problem from your k3s or k8s (FIX IT BEFORE)
```
sudo ./clone_opencloud_microservices.sh demo-alpr
cd ./docker
./start-demo.sh
```
GO on localhost:8000, prefer a "chromium-browser --disable-web-security" chrome no CORS session to reach img.
Before launch or to stop properly
```
cd ./docker
./stop.sh
```
if you want a linux app :
```
cd ../oc-front
./local_run_traefik.sh
```
# Purpose of this component
The purpose of oc-deploy, is to deploy all the OC components over a Kubernetes cluster.

22
datas/add.sh Normal file
View File

@@ -0,0 +1,22 @@
#!/bin/bash
DB="DC_myDC"
CONTAINER="mongo"
echo "📌 Dropping database '$DB'..."
docker exec -i $CONTAINER mongosh --eval "db.getSiblingDB('$DB').dropDatabase()"
echo "📌 Copying datas/ to container..."
docker cp ./datas $CONTAINER:/datas
echo "📌 Importing JSON files..."
for i in ./datas/*.json; do
filename=$(basename "$i")
collection="${filename%.json}"
echo "→ Importing '$filename' into collection '$collection'..."
docker exec -i $CONTAINER sh -c \
"mongoimport --jsonArray --db $DB --collection $collection --file /datas/$filename --drop"
done
echo "✔ Done!"

View File

@@ -0,0 +1 @@
[{"_id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","abstractobject":{"id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","name":"test","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":{"$date":"2025-01-27T10:41:47.741Z"},"update_date":{"$date":"2025-01-27T10:41:47.741Z"},"updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":0},"description":"Proto Collaborative area example","collaborative_area":{},"workflows":["58314c99-c595-4ca2-8b5e-822a6774efed"],"allowed_peers_group":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"workspaces":[]}]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"_id":"c0cece97-7730-4c2a-8c20-a30944564106","failed_execution":null,"abstractobject":{"update_date":{"$date":"2025-03-27T09:13:13.230Z"},"access_mode":0,"id":"c0cece97-7730-4c2a-8c20-a30944564106","name":"local","is_draft":false,"creation_date":{"$date":"2025-03-27T09:13:13.230Z"}},"url":"http://localhost:8000","wallet_address":"my-wallet","public_key":"-----BEGIN RSA PUBLIC KEY-----\nMIICCgKCAgEAw2pdG6wMtuLcP0+k1LFvIb0DQo/oHW2uNJaEJK74plXqp4ztz2dR\nb+RQHFLeLuqk4i/zc3b4K3fKPXSlwnVPJCwzPrnyT8jYGOZVlWlETiV9xeJhu6s/\nBh6g1PWz75XjjwV50iv/CEiLNBT23f/3J44wrQzygqNQCiQSALdxWLAEl4l5kHSa\n9oMyV70/Uql94/ayMARZsHgp9ZvqQKbkZPw6yzVMfCBxQozlNlo315OHevudhnhp\nDRjN5I7zWmqYt6rbXJJC7Y3Izdvzn7QI88RqjSRST5I/7Kz3ndCqrOnI+OQUE5NT\nREyQebphvQfTDTKlRPXkdyktdK2DH28Zj6ZF3yjQvN35Q4zhOzlq77dO5IhhopI7\nct8dZH1T1nYkvdyCA/EVMtQsASmBOitH0Y0ACoXQK5Kb6nm/TcM/9ZSJUNiEMuy5\ngBZ3YKE9oa4cpTpPXwcA+S/cU7HPNnQAsvD3iJi8GTW9uJs84pn4/WhpQqmXd4rv\nhKWECCN3fHy01fUs/U0PaSj2jDY/kQVeXoikNMzPUjdZd9m816TIBh3v3aVXCH/0\niTHHAxctvDgMRb2fpvRJ/wwnYjFG9RpamVFDMvC9NffuYzWAA9IRIY4cqgerfHrV\nZ2HHiPTDDvDAIsvImXZc/h7mXN6m3RCQ4Qywy993wd9gUdgg/qnynHcCAwEAAQ==\n-----END RSA PUBLIC KEY-----\n","state":1}]

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"_id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","name":"IRT risk database","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT risk database.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"env":[{"attr":"source","readonly":true}],"resourceinstance":{"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":false,"security_level":"public","size":50,"size_type":3,"redundancy":"RAID5","throughput":"r:200,w:150"}]},"storage_type":5,"acronym":"DC_myDC"},{"_id":"e726020a-b68e-4abc-ab36-c3640ea3f557","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"e726020a-b68e-4abc-ab36-c3640ea3f557","name":"IRT local file storage","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT local file storage.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"resourceinstance":{"env":[{"attr":"source","readonly":true}],"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":true,"security_level":"public","size":500,"size_type":0,"encryption":true,"redundancy":"RAID5S","throughput":"r:300,w:350"}]},"storage_type":5,"acronym":"DC_myDC"}]

File diff suppressed because one or more lines are too long

19
deployed_config Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURCVENDQWUyZ0F3SUJBZ0lJT0lhTzFqdnRET0F3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRFeE1UTXhNVFU0TVRaYUZ3MHpOVEV4TVRFeE1qQXpNVFphTUJVeApFekFSQmdOVkJBTVRDbXQxWW1WeWJtVjBaWE13Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLCkFvSUJBUUNsVGxkUEozTkpON0wyblVtbDFLM3pNL21PV3VLN0FKZzBjNktJY01nZFhoaEF3Z0FPRzFuUnZhRG8KL3N3ODBweUFjbEJSbzg2bnlyM1d6UUVYa1hTTDY2bFV6LzJzaHh5QlliejJXTDlZeUZGVmxwSzlPY3BRQjVIegpURUNrNStIY28rK1NJVndXUHc0dCtQZXhsb2VpaHZhUUJvUE54d2lxWjhhWG50NUljd0lXU1ZqMVVsT1p1NmhwCnA3VUVuS0dhTWl3Zm5Zb2o4MmNvUVFEdFlEWi9MQS80L3V1UzVlUFZKaTBpb1dXMGduTWdzUm1IUzltY3cvZzkKK1hYVm5vN1lLekYvRjEyeTZPQ0YrOUpGd2JqWmFiVlJhc21rQjZyTFZ1N2FsMi9TQ3VyaitEWk5Mcys5WHVTYgpFb2I2UE8rQlhlNmJDdGp5aWZvZmJ2TExXREc5QWdNQkFBR2pXVEJYTUE0R0ExVWREd0VCL3dRRUF3SUNwREFQCkJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJTd2oyczRpUG9rV0FnSFlNQ1czZ2NxMEEzNlZ6QVYKQmdOVkhSRUVEakFNZ2dwcmRXSmxjbTVsZEdWek1BMEdDU3FHU0liM0RRRUJDd1VBQTRJQkFRQWc3ZW9BTWRlZgpwN21IYVFnR1F2YnRQRHVQY2REa2J1NjVKWDI2ZzhNMy9WMlovaEp4MlpqVE0wdTZmNExuOUFnc1p0R3dhL1RRClp0aGRpQWdWdDRuNjZBZ1lLQS8yYlNBbVNWZ1R0cngyd29xN2VzbnJjc1VUcm5ISXptVS9TR01CVzNtTlZnd0sKWnpLN3ZuTm9jaHAzYzNDa0xmbWFXeWpMUjljVXVWejB0T3psa0p5SDB6OUNrQVVXdmVJZ3VTR2Y2WWtManRPZQpvdld3ZHJUcTlLaGQ2SEVXa0lIM241QjNDbTF0aXE0bGFuaVJERkhheWk1enRBSDBYME1UOUhaNGszR0ErdXA4CkZJZXdubDJXTmJoVGxraXdhMzRRTUhDelhpUXdGTThjODUwTnJGNXFOSEVTbUMzeWtGdjk3VlNqOW8wb3pPS3YKSWlERkRVSTZybG0rCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
server: https://127.0.0.1:36289
name: kind-opencloud
contexts:
- context:
cluster: kind-opencloud
user: kind-opencloud
name: kind-opencloud
current-context: kind-opencloud
kind: Config
users:
- name: kind-opencloud
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURLVENDQWhHZ0F3SUJBZ0lJVnpzcEVlN3Z2eFF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB5TlRFeE1UTXhNVFU0TVRaYUZ3MHlOakV4TVRNeE1qQXpNVFphTUR3eApIekFkQmdOVkJBb1RGbXQxWW1WaFpHMDZZMngxYzNSbGNpMWhaRzFwYm5NeEdUQVhCZ05WQkFNVEVHdDFZbVZ5CmJtVjBaWE10WVdSdGFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFEV2s3YlkKYWVYQjJGaUczSjc1Zk5iaGkxQkhkTTdrNk8yQ0p1WkJydzE0UldSNWZ2YnZUNHVBRm1LZ0VFWHk0angvaWdwOQpFUm9QUmFxUFoxQSs0N21HRUl0bjdSdFFuY0k2N3FMSUxnUUU4ZkhWTE9GU0hVRmV0S05tbGxZNEErWDVRejZmCjBHdExBZzNoMlc4bmtibGtzakNVQjR3SEF5ZnMrM1dtZmJRb0YzcmU5NUlKMDZCY2NtOTgyZTFVUUpsZ1YzaW4KN0pQdlRDYmp0bkR1UmV4VXpyazJsK1JHWjVHYitaZEs3Z1QvS2MvdFhONjVIYTRiTHc2aFR4RzdxZlB5dnlSdAphblVYcHQ5SVNSd3BPc2R6YjF1RkZTYUN6V1FBWUNJc3RpeWs1bkszVWJwL1ZLS2trTFlub2NVbjdKNUtOdDJFCjhTcTZ1N2RjRWNqZFBaUWhBZ01CQUFHalZqQlVNQTRHQTFVZER3RUIvd1FFQXdJRm9EQVRCZ05WSFNVRUREQUsKQmdnckJnRUZCUWNEQWpBTUJnTlZIUk1CQWY4RUFqQUFNQjhHQTFVZEl3UVlNQmFBRkxDUGF6aUkraVJZQ0FkZwp3SmJlQnlyUURmcFhNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUJjSUEyeE9rYXk4aTlLS3pRWUh5bmM5a2xyCmU4ZEN1aHpEUWhGbVhYK3pmeTk3dnRaSnlEWFp4TlN0MlRoS24xeE5KckNKVkxPWTAxV0FDU2JNZm5wRGdxVjgKUWZjRXVFUHdYSithMUV0ZmpsajZPTU41Q0RvYWJnRUllSkhxVkhrZkJzdE5icXFrTEppUThvZmh2VDc4TE1Bcwp2emJNTnd5L0ZXOVBVK0YvUGJkOEdEZkVPWHU3UFJzbmV5Q0JHVXhoNThOM2lmNFhnOXh6L3hwM2EvNE1hK28vClc2RklOUUNkNjVzcVFSWEx1U3VpRjlTTG9peUtYdmJUb1UxNU9YZTNOWFNWTjNOdUdRWmlZWDU4OTFyZGtpZFIKL1NuN3VTTzJDWXNPK3l4QWlqbUZhQmZIeWpNUlZKak51WnpSbStwTDdoODFmNFQ5dDJ1MWpQeVpPbGRiCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcFFJQkFBS0NBUUVBMXBPMjJHbmx3ZGhZaHR5ZStYelc0WXRRUjNUTzVPanRnaWJtUWE4TmVFVmtlWDcyCjcwK0xnQlppb0JCRjh1SThmNG9LZlJFYUQwV3FqMmRRUHVPNWhoQ0xaKzBiVUozQ091Nml5QzRFQlBIeDFTemgKVWgxQlhyU2pacFpXT0FQbCtVTStuOUJyU3dJTjRkbHZKNUc1WkxJd2xBZU1Cd01uN1B0MXBuMjBLQmQ2M3ZlUwpDZE9nWEhKdmZObnRWRUNaWUZkNHAreVQ3MHdtNDdadzdrWHNWTTY1TnBma1JtZVJtL21YU3U0RS95blA3VnplCnVSMnVHeThPb1U4UnU2bno4cjhrYldwMUY2YmZTRWtjS1RySGMyOWJoUlVtZ3Mxa0FHQWlMTFlzcE9aeXQxRzYKZjFTaXBKQzJKNkhGSit5ZVNqYmRoUEVxdXJ1M1hCSEkzVDJVSVFJREFRQUJBb0lCQUFkSElhWldkU29DMEt5RwpVc2dMTEJpZ245dVo3U013enFRaG9LRllDQ0RDV0hBOGdRWHpPenZnRzlQcVZBMElaUWZvWW9jRkZrNnY5Mk1xCkhHWjlxbjhQRkVOZjlKTmlrOElVUjZZbGdCSm1NdzhldzJldkZxd0QwWFQ3SXJmVXFLOWJKZ1p5b2I1U0RBUW8KaFU5MkdhL1RmQTFSUjR1OHJOVXFDWlFEamN3OFFSQTQ4SDBzOTJkU252QkN1SmJrQ0VIYXVtQTYwQVlsNHNMOApzS0o0NytFc29WTWZhK1dCOCsybnRYMHFqQlhLM1Yvc1UyZWJTN0tYTGZhVkg5Z21oU01LMFd2dG9peDRRQzlEClViV3RaTCtoSGN6WWxmcjZaYWQxeEFsaHRXYnNDS3p3ZWdjOGxQbVBqSUJmMUU0bjRDQW1OMmJ5R00wUlRrT1QKWHdvdWdEOENnWUVBNGVISWQ0Zy9FV3k0dmx0NGxUN2FnOFJKaVhxMHBOQXVDdTRBL25tVWpnTVVVcFFPbmd6cAora3d6ZjFNSUJnVGR0ejlHNU1zNmd6UHgxaTlYblVOZ1hEUlRuWTRSZkZ3Q256NXVNcW5LZDd3Njhwem9acUxGCjJpSVZ6SmtGUmVoaTNVbXVLWnJmRnJKekYrOFArMGtmZmpjNjcvZkF1c2pnellFbWl5dGxmQnNDZ1lFQTh6QU0KdUh3VG1WMC9aRFlyUUYxLzA2R1ZqbzRtT2Z4S0loUVBxdDZleVNuWElySGlBbUVnTjFOWTc1UHRtMGVVdXF0bApDanU4dmp4aHd0UUF6NnUwNEptTldpTzdZL0ZiYlVKNnAxcHJYaURVWXYvUkRwK3FHa1I1SExsd0gvWENrYzIxCnpnREhJMlVXMzZCUk4wMFhydjdGWThxaHNqU0dlZU1Gei9pNXZITUNnWUVBcDhJSklZVmwyYW9XZHdIMlIxbWIKN2xxOGhzZEVIRmVrcW1kakE1d0dVWVpGOUtLVFRKeW90VVVjeGdaRG9qekE4ZFNqOFU1aVVZa2xwZjRaSXVvawpTYlp2RjBlcEF1Uk82amZ5bmR2dVRBalcrdEsvNDJJbWNULzVVcStlOC9HSVkzTFNUNEgvQjV0VzBVS3lhdDArCjczMVRYMTl3bXdpUHRQQ2pVSjdWUzFzQ2dZRUF3NWthSWlocCt5aXRIQVVWdEtkL2NOQytZZktqZkhBWGtHRmkKV0tUR1FqYU0rekxuL2RIdy80N2lNWkJoeEV0R3JQMitQd1RkUW9WK2ZCM1lxVEFLUTd3OW5RcXdaaXB5eHVaNQprTEdCT2l4ZHAyTHEyMEJBcU8vNkdjaHREc2UwdjJFZG9adXVrQ0YyekZjOSs2VGVMN3ByT1dCNXZjUFJoYWU3CnZSTHBFVkVDZ1lFQXAyREYyWlJFRlZmZ3ZGc2dtdHRwVjFMemd2Qi9Fb0lFMTFpMmFPelZGTzRLb3pabWpHVlAKaTB6T3VlaVBsZmNCYU5ZanRIbkxrRUpYeGVwYm9sSnlvQUdWV0o2b2grcFhON2I5TURJVUhVV0E3ZFArc1NlMwpvS29adS9TVGdJa1VQb2xwa2lJNjJrRXBFdXE4bjRYOFVhUWV5M1E4c1VWNHpYM0dSa3d2QkFZPQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

9
docker/db/add.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash
docker cp ./datas mongo:.
for i in $(ls ./datas); do
firstString=$i
echo "ADD file $i in collection ${i/.json/}"
docker exec -it mongo sh -c "mongoimport --jsonArray --db DC_myDC --collection ${i/.json/} --file ./datas/$i"
done

View File

@@ -0,0 +1 @@
[{"_id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","abstractobject":{"id":"0b6a375f-be3e-49a9-9827-3c2d5eddb057","name":"test","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":{"$date":"2025-01-27T10:41:47.741Z"},"update_date":{"$date":"2025-01-27T10:41:47.741Z"},"updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":0},"description":"Proto Collaborative area example","collaborative_area":{},"workflows":["58314c99-c595-4ca2-8b5e-822a6774efed"],"allowed_peers_group":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"workspaces":[]}]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"_id":"c0cece97-7730-4c2a-8c20-a30944564106","failed_execution":null,"abstractobject":{"update_date":{"$date":"2025-03-27T09:13:13.230Z"},"access_mode":0,"id":"c0cece97-7730-4c2a-8c20-a30944564106","name":"local","is_draft":false,"creation_date":{"$date":"2025-03-27T09:13:13.230Z"}},"url":"http://localhost:8000","wallet_address":"my-wallet","public_key":"-----BEGIN RSA PUBLIC KEY-----\nMIICCgKCAgEAw2pdG6wMtuLcP0+k1LFvIb0DQo/oHW2uNJaEJK74plXqp4ztz2dR\nb+RQHFLeLuqk4i/zc3b4K3fKPXSlwnVPJCwzPrnyT8jYGOZVlWlETiV9xeJhu6s/\nBh6g1PWz75XjjwV50iv/CEiLNBT23f/3J44wrQzygqNQCiQSALdxWLAEl4l5kHSa\n9oMyV70/Uql94/ayMARZsHgp9ZvqQKbkZPw6yzVMfCBxQozlNlo315OHevudhnhp\nDRjN5I7zWmqYt6rbXJJC7Y3Izdvzn7QI88RqjSRST5I/7Kz3ndCqrOnI+OQUE5NT\nREyQebphvQfTDTKlRPXkdyktdK2DH28Zj6ZF3yjQvN35Q4zhOzlq77dO5IhhopI7\nct8dZH1T1nYkvdyCA/EVMtQsASmBOitH0Y0ACoXQK5Kb6nm/TcM/9ZSJUNiEMuy5\ngBZ3YKE9oa4cpTpPXwcA+S/cU7HPNnQAsvD3iJi8GTW9uJs84pn4/WhpQqmXd4rv\nhKWECCN3fHy01fUs/U0PaSj2jDY/kQVeXoikNMzPUjdZd9m816TIBh3v3aVXCH/0\niTHHAxctvDgMRb2fpvRJ/wwnYjFG9RpamVFDMvC9NffuYzWAA9IRIY4cqgerfHrV\nZ2HHiPTDDvDAIsvImXZc/h7mXN6m3RCQ4Qywy993wd9gUdgg/qnynHcCAwEAAQ==\n-----END RSA PUBLIC KEY-----\n","state":1}]

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"_id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"04bc70b5-8d7b-44e6-9015-fadfa0fb102d","name":"IRT risk database","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT risk database.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"env":[{"attr":"source","readonly":true}],"resourceinstance":{"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":false,"security_level":"public","size":50,"size_type":3,"redundancy":"RAID5","throughput":"r:200,w:150"}]},"storage_type":5,"acronym":"DC_myDC"},{"_id":"e726020a-b68e-4abc-ab36-c3640ea3f557","abstractinstanciatedresource":{"abstractresource":{"type":"storage","abstractobject":{"id":"e726020a-b68e-4abc-ab36-c3640ea3f557","name":"IRT local file storage","is_draft":false,"creator_id":"c0cece97-7730-4c2a-8c20-a30944564106","creation_date":"2021-09-30T14:00:00.000Z","update_date":"2021-09-30T14:00:00.000Z","updater_id":"c0cece97-7730-4c2a-8c20-a30944564106","access_mode":1},"logo":"https://cloud.o-forge.io/core/deperecated-oc-catalog/raw/branch/main/scripts/local_imgs/IRT local file storage.png","description":"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.","short_description":"S3 compliant IRT file storage","owners":[{"name":"IRT"}]},"instances":[{"resourceinstance":{"env":[{"attr":"source","readonly":true}],"abstractobject":{"id":"7fdccb9c-7090-40a5-bacd-7435bc56c90d","name":"IRT local file storage Marseille"},"location":{"latitude":50.62925,"longitude":3.057256},"country":250,"partnerships":[{"resourcepartnership":{"namespace":"default","peer_groups":{"c0cece97-7730-4c2a-8c20-a30944564106":["*"]},"pricing_profiles":[{"pricing":{"price":50,"currency":"EUR","buying_strategy":0,"time_pricing_strategy":0}}]}}]},"source":"/mnt/vol","local":true,"security_level":"public","size":500,"size_type":0,"encryption":true,"redundancy":"RAID5S","throughput":"r:300,w:350"}]},"storage_type":5,"acronym":"DC_myDC"}]

File diff suppressed because one or more lines are too long

View File

@@ -1,88 +0,0 @@
version: '3.8'
services:
traefik:
image: traefik:latest
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:80"
ports:
- "80:80"
- "8080:8080"
volumes:
- /var/run/docker.sock:/var/run/docker.sock
mongo:
image: mongo:latest
ports:
- "27017:27017"
volumes:
- mongo-data:/data/db
labels:
- "traefik.enable=true"
- "traefik.http.routers.mongo.rule=PathPrefix(`/mongo`)"
- "traefik.http.services.mongo.loadbalancer.server.port=27017"
nats:
image: nats:latest
ports:
- "4222:4222"
labels:
- "traefik.enable=true"
- "traefik.http.routers.nats.rule=PathPrefix(`/nats`)"
- "traefik.http.services.nats.loadbalancer.server.port=4222"
zinc:
image: public.ecr.aws/zinclabs/zincsearch:latest
ports:
- "4080:4080"
labels:
- "traefik.enable=true"
- "traefik.http.routers.zinc.rule=PathPrefix(`/zinc`)"
- "traefik.http.services.zinc.loadbalancer.server.port=4080"
dex:
image: quay.io/dexidp/dex:latest
ports:
- "5556:5556"
volumes:
- ./dex/config.yaml:/etc/dex/cfg/config.yaml
command: ["dex", "serve", "/etc/dex/cfg/config.yaml"]
labels:
- "traefik.enable=true"
- "traefik.http.routers.dex.rule=PathPrefix(`/dex`)"
- "traefik.http.services.dex.loadbalancer.server.port=5556"
ldap:
image: bitnami/openldap
ports:
- "389:389"
environment:
- LDAP_ADMIN_USERNAME=admin
- LDAP_ADMIN_PASSWORD=adminpassword
- LDAP_USERS=user01,user02
- LDAP_PASSWORDS=password1,password2
grafana:
image: grafana/grafana:latest
ports:
- "3000:3000"
environment:
GF_SECURITY_ADMIN_PASSWORD: "admin"
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.rule=PathPrefix(`/grafana`)"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
loki:
image: grafana/loki:latest
ports:
- "3100:3100"
labels:
- "traefik.enable=true"
- "traefik.http.routers.loki.rule=PathPrefix(`/loki`)"
- "traefik.http.services.loki.loadbalancer.server.port=3100"
volumes:
mongo-data:

48
docker/start-demo.sh Executable file
View File

@@ -0,0 +1,48 @@
#!/bin/bash
server=$(grep 'server:' ~/.kube/config | awk '{print $2}')
host=$(ip -4 addr show $(ip route | awk '/default/ {print $5}') | awk '/inet / {print $2}' | cut -d/ -f1)
port=6443
ca=$(kubectl config view --raw --minify -o jsonpath='{.clusters[0].cluster.certificate-authority-data}')
cert=$(kubectl config view --raw --minify -o jsonpath='{.users[0].user.client-certificate-data}')
key=$(kubectl config view --raw --minify -o jsonpath='{.users[0].user.client-key-data}')
HOST=${2:-"http://localhost:8000"}
docker network create oc | true
docker compose down
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
cd ./db && ./add.sh && cd ..
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Building $i"
docker kill $i | true
docker rm $i | true
cd ./$i
docker build . -t $i --build-arg=HOST=$HOST --build-arg=KUBERNETES_SERVICE_HOST=$host \
--build-arg=KUBERNETES_SERVICE_PORT=$port --build-arg=KUBE_CA=$ca --build-arg=KUBE_CERT=$cert \
--build-arg=KUBE_DATA=$key && docker compose up -d
cd ..
done
cd ./oc-deploy/docker/tools && docker compose -f ./docker-compose.dev.yml up hydra-client --force-recreate -d

50
docker/start.sh Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/bash
server=$(grep 'server:' ~/.kube/config | awk '{print $2}')
host=$(ip -4 addr show $(ip route | awk '/default/ {print $5}') | awk '/inet / {print $2}' | cut -d/ -f1)
port=6443
ca=$(kubectl config view --raw --minify -o jsonpath='{.clusters[0].cluster.certificate-authority-data}')
cert=$(kubectl config view --raw --minify -o jsonpath='{.users[0].user.client-certificate-data}')
key=$(kubectl config view --raw --minify -o jsonpath='{.users[0].user.client-key-data}')
export HOST=${HOST:-"http://localhost:8000"}
docker network create oc | true
docker compose down
cd ./tools && docker compose -f ./docker-compose.dev.yml up --force-recreate -d
docker compose -f ./docker-compose.traefik.yml up --force-recreate -d && cd ..
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Building $i"
docker kill $i | true
docker rm $i | true
cd ./$i
cat > ./env.env <<EOF
KUBERNETES_SERVICE_HOST=$hostdocker
KUBERNETES_SERVICE_PORT=$port
KUBE_CA="$ca"
KUBE_CERT="$cert"
KUBE_DATA="$key"
EOF
make run-docker
cd ..
done
cd ./oc-deploy/docker/tools && docker compose -f ./docker-compose.dev.yml up hydra-client --force-recreate -d

50
docker/stop.sh Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/bash
docker network delete oc | true
docker compose -f ./tools/docker-compose.traefik.yml down
TOOLS=(
"mongo"
"mongo-express"
"nats"
"loki"
"grafana"
"hydra-client"
"hydra"
"keto"
"ldap"
)
for i in "${TOOLS[@]}"
do
echo "kill $i"
docker kill $i | true
docker rm $i | true
done
docker volume rm tools_oc-data
cd ../..
REPOS=(
"oc-auth"
"oc-catalog"
"oc-datacenter"
"oc-monitord"
"oc-peer"
"oc-shared"
"oc-scheduler"
"oc-schedulerd"
"oc-workflow"
"oc-workspace"
"oc-front"
)
for i in "${REPOS[@]}"
do
echo "Kill $i"
cd ./$i
docker kill $i | true
docker rm $i | true
make purge | true
cd ..
done

View File

@@ -0,0 +1,8 @@
datasources:
- name: Loki
type: loki
access: proxy
url: http://loki:3100
isDefault: true
jsonData:
httpMethod: POST

View File

@@ -0,0 +1,163 @@
version: '3.4'
services:
mongo:
image: 'mongo:latest'
networks:
- oc
ports:
- 27017:27017
container_name: mongo
volumes:
- oc-data:/data/db
- oc-data:/data/configdb
mongo-express:
image: "mongo-express:latest"
restart: always
depends_on:
- mongo
networks:
- oc
ports:
- 8081:8081
environment:
- ME_CONFIG_BASICAUTH_USERNAME=test
- ME_CONFIG_BASICAUTH_PASSWORD=test
nats:
image: 'nats:latest'
container_name: nats
ports:
- 4222:4222
command:
- "--debug"
networks:
- oc
loki:
image: 'grafana/loki'
container_name: loki
labels:
- "traefik.enable=true"
- "traefik.http.routers.loki.entrypoints=web"
- "traefik.http.routers.loki.rule=PathPrefix(`/tools/loki`)"
- "traefik.http.services.loki.loadbalancer.server.port=3100"
- "traefik.http.middlewares.loki-stripprefix.stripprefix.prefixes=/tools/loki"
- "traefik.http.routers.loki.middlewares=loki-stripprefix"
- "traefik.http.middlewares.loki.forwardauth.address=http://oc-auth:8080/oc/forward"
ports :
- "3100:3100"
networks:
- oc
grafana:
image: 'grafana/grafana'
container_name: grafana
ports:
- '3000:3000'
labels:
- "traefik.enable=true"
- "traefik.http.routers.grafana.entrypoints=web"
- "traefik.http.routers.grafana.rule=PathPrefix(`/tools/grafana`)"
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
- "traefik.http.middlewares.grafana-stripprefix.stripprefix.prefixes=/tools/grafana"
- "traefik.http.routers.grafana.middlewares=grafana-stripprefix"
- "traefik.http.middlewares.grafana.forwardauth.address=http://oc-auth:8080/oc/forward"
networks:
- oc
volumes:
- ./conf/grafana_data_source.yml:/etc/grafana/provisioning/datasources/datasource.yml
environment:
- GF_SECURITY_ADMIN_PASSWORD=pfnirt # Change this to anything but admin to not have a password change page at startup
- GF_SECURITY_ADMIN_USER=admin
- GF_SECURITY_DISABLE_INITIAL_ADMIN_PASSWORD_CHANGE=true
hydra:
container_name: hydra
image: oryd/hydra:v2.2.0
environment:
SECRETS_SYSTEM: oc-auth-got-secret
LOG_LEAK_SENSITIVE_VALUES: true
# OAUTH2_TOKEN_HOOK_URL: http://oc-auth:8080/oc/claims
URLS_SELF_ISSUER: http://hydra:4444
URLS_SELF_PUBLIC: http://hydra:4444
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_SCOPES: profile,email,phone,roles
WEBFINGER_OIDC_DISCOVERY_SUPPORTED_CLAIMS: name,family_name,given_name,nickname,email,phone_number
DSN: memory
command: serve all --dev
networks:
- oc
ports:
- "4444:4444"
- "4445:4445"
deploy:
restart_policy:
condition: on-failure
ldap:
image: pgarrett/ldap-alpine
container_name: ldap
volumes:
- "./ldap.ldif:/ldif/ldap.ldif"
networks:
- oc
ports:
- "390:389"
deploy:
restart_policy:
condition: on-failure
keto:
image: oryd/keto:v0.7.0-alpha.1-sqlite
ports:
- "4466:4466"
- "4467:4467"
command: serve -c /home/ory/keto.yml
restart: on-failure
volumes:
- type: bind
source: .
target: /home/ory
container_name: keto
networks:
- oc
hydra-client:
image: oryd/hydra:v2.2.0
container_name: hydra-client
environment:
HYDRA_ADMIN_URL: http://hydra:4445
ORY_SDK_URL: http://hydra:4445
command:
- create
- oauth2-client
- --skip-tls-verify
- --name
- test-client
- --secret
- oc-auth-got-secret
- --response-type
- id_token,token,code
- --grant-type
- implicit,refresh_token,authorization_code,client_credentials
- --scope
- openid,profile,email,roles
- --token-endpoint-auth-method
- client_secret_post
- --redirect-uri
- http://localhost:3000
networks:
- oc
deploy:
restart_policy:
condition: none
depends_on:
- hydra
healthcheck:
test: ["CMD", "curl", "-f", "http://hydra:4445"]
interval: 10s
timeout: 10s
retries: 10
volumes:
oc-data:
networks:
oc:
external: true

View File

@@ -0,0 +1,24 @@
version: '3.4'
services:
traefik:
image: traefik:v2.10.4
container_name: traefik
restart: unless-stopped
networks:
- oc
command:
- "--api.insecure=true"
- "--providers.docker=true"
- "--entrypoints.web.address=:8000"
ports:
- "8000:8000" # Expose Traefik on port 8000
volumes:
- /var/run/docker.sock:/var/run/docker.sock
volumes:
oc-data:
networks:
oc:
external: true

18
docker/tools/keto.yml Normal file
View File

@@ -0,0 +1,18 @@
version: v0.6.0-alpha.1
log:
level: debug
namespaces:
- id: 0
name: open-cloud
dsn: memory
serve:
read:
host: 0.0.0.0
port: 4466
write:
host: 0.0.0.0
port: 4467

24
docker/tools/ldap.ldif Normal file
View File

@@ -0,0 +1,24 @@
dn: uid=admin,ou=Users,dc=example,dc=com
objectClass: inetOrgPerson
cn: Admin
sn: Istrator
uid: admin
userPassword: admin
mail: admin@example.com
ou: Users
dn: ou=AppRoles,dc=example,dc=com
objectClass: organizationalunit
ou: AppRoles
description: AppRoles
dn: ou=App1,ou=AppRoles,dc=example,dc=com
objectClass: organizationalunit
ou: App1
description: App1
dn: cn=traveler,ou=App1,ou=AppRoles,dc=example,dc=com
objectClass: groupofnames
cn: traveler
description: traveler
member: uid=admin,ou=Users,dc=example,dc=com

5
env.env Normal file
View File

@@ -0,0 +1,5 @@
KUBERNETES_SERVICE_HOST=127.0.0.1
KUBERNETES_SERVICE_PORT=6443
KUBE_CA=""
KUBE_CERT=""
KUBE_DATA=""

0
envs/cluster-1.env Normal file
View File

0
envs/cluster-2.env Normal file
View File

View File

@@ -1,5 +0,0 @@
apiVersion: v2
name: oc-catalog
description: A Helm chart for deploying the oc-catalog application
version: 0.1.0
appVersion: "1.0"

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-oc-catalog
spec:
selector:
app: {{ .Chart.Name }}
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-oc-catalog
labels:
app: oc-catalog
spec:
serviceName: "{{ .Release.Name }}-oc-catalog"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: oc-catalog
template:
metadata:
labels:
app: oc-catalog
spec:
containers:
- name: oc-catalog
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 8080
env:
- name: MONGO_DATABASE
value: "DC_myDC"
- name: MONGO_URI
value: "mongodb://mongo:27017"
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/oc-catalog
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8087
targetPort: 8080
mongo:
database: DC_myDC
uri: mongodb://mongo:27017
imagePullSecrets:
- name: regcred

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,12 +0,0 @@
dependencies:
- name: oc-mongo
repository: file://../oc-mongo
version: 0.1.0
- name: oc-mongo-express
repository: file://../oc-mongo-express
version: 0.1.0
- name: oc-catalog
repository: file://../oc-catalog
version: 0.1.0
digest: sha256:036af8acf7fe0a73f039776d13f63aeb7530e7a8b0febb49fd5e8415ac6672c6
generated: "2024-08-27T14:34:41.6038407+02:00"

View File

@@ -1,14 +0,0 @@
apiVersion: v2
name: oc-deploy
description: A Helm chart to deploy oc-mongo, oc-mongo-express, and oc-catalog together
version: 0.1.0
dependencies:
- name: oc-mongo
version: 0.1.0
repository: "file://../oc-mongo"
- name: oc-mongo-express
version: 0.1.0
repository: "file://../oc-mongo-express"
- name: oc-catalog
version: 0.1.0
repository: "file://../oc-catalog"

View File

@@ -1,5 +0,0 @@
apiVersion: v2
name: oc-catalog
description: A Helm chart for deploying the oc-catalog application
version: 0.1.0
appVersion: "1.0"

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: oc-catalog
spec:
selector:
app: {{ .Chart.Name }}
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@@ -1,33 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-oc-catalog
labels:
app: oc-catalog
spec:
serviceName: "oc-catalog"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: oc-catalog
template:
metadata:
labels:
app: oc-catalog
spec:
containers:
- name: oc-catalog
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 8080
env:
- name: MONGO_DATABASE
value: "DC_myDC"
- name: MONGO_URI
value: "mongodb://{{ .Release.Name }}-mongo:27017"
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/oc-catalog
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8087
targetPort: 8080
mongo:
database: DC_myDC
uri: mongodb://oc-deploy-mongo:27017
imagePullSecrets:
- name: regcred

View File

@@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo-express
description: A Helm chart for deploying mongo-express
version: 0.1.0
appVersion: "1.0"

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo-express
spec:
selector:
app: mongo-express
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo-express
labels:
app: mongo-express
spec:
serviceName: "mongo-express"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo-express
template:
metadata:
labels:
app: mongo-express
spec:
containers:
- name: mongo-express
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: {{ .Values.service.targetPort }}
env:
- name: ME_CONFIG_BASICAUTH_USERNAME
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.usernameKey }}
- name: ME_CONFIG_BASICAUTH_PASSWORD
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.passwordKey }}
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@@ -1,18 +0,0 @@
replicaCount: 1
image:
repository: mongo-express
tag: latest
pullPolicy: IfNotPresent
service:
port: 8081
targetPort: 8081
type: NodePort
imagePullSecrets:
- name: my-registry-key
secret:
usernameKey: mongo-username
passwordKey: mongo-password

View File

@@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo
description: A Helm chart for deploying the oc-mongo component
version: 0.1.0
appVersion: "1.0"

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.persistence.name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.size }}

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-mongo-secret
type: Opaque
data:
username: {{ .Values.secret.username }}
password: {{ .Values.secret.password }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
spec:
selector:
app: mongo
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}

View File

@@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo
labels:
app: mongo
spec:
serviceName: "mongo"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-persistent-storage
mountPath: /data/configdb
volumes:
- name: mongo-persistent-storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.name }}

View File

@@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: mongo
tag: latest
pullPolicy: IfNotPresent
service:
port: 27017
persistence:
name: mongo-pvc-helm
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
secret:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'

View File

@@ -1,48 +0,0 @@
oc-mongo:
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/mongo
tag: latest
pullPolicy: IfNotPresent
service:
port: 27017
persistence:
name: mongo-pvc-helm
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
secret:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'
oc-mongo-express:
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/mongo-express
tag: latest
pullPolicy: IfNotPresent
service:
port: 8081
targetPort: 8081
type: NodePort
imagePullSecrets:
- name: regcred
secret:
usernameKey: mongo-username
passwordKey: mongo-password
oc-catalog:
replicaCount: 1
image:
repository: registry.dev.svc.cluster.local:5000/oc-catalog
tag: latest
pullPolicy: IfNotPresent
service:
type: NodePort
port: 8087
targetPort: 8080
mongo:
database: DC_myDC
uri: mongodb://oc-catalog-mongo:27017
imagePullSecrets:
- name: regcred

View File

@@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo-express
description: A Helm chart for deploying mongo-express
version: 0.1.0
appVersion: "1.0"

View File

@@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ .Release.Name }}-mongo-express
spec:
selector:
app: mongo-express
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.targetPort }}
type: {{ .Values.service.type }}

View File

@@ -1,39 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo-express
labels:
app: mongo-express
spec:
serviceName: "{{ .Release.Name }}-mongo-express"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo-express
template:
metadata:
labels:
app: mongo-express
spec:
containers:
- name: mongo-express
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: {{ .Values.service.targetPort }}
env:
- name: ME_CONFIG_BASICAUTH_USERNAME
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.usernameKey }}
- name: ME_CONFIG_BASICAUTH_PASSWORD
valueFrom:
secretKeyRef:
name: mongo-secret
key: {{ .Values.secret.passwordKey }}
imagePullSecrets:
{{- if .Values.imagePullSecrets }}
{{- range .Values.imagePullSecrets }}
- name: {{ .name }}
{{- end }}
{{- end }}

View File

@@ -1,18 +0,0 @@
replicaCount: 1
image:
repository: mongo-express
tag: latest
pullPolicy: IfNotPresent
service:
port: 8081
targetPort: 8081
type: NodePort
imagePullSecrets:
- name: my-registry-key
secret:
usernameKey: mongo-username
passwordKey: mongo-password

View File

@@ -1,5 +0,0 @@
apiVersion: v2
name: oc-mongo
description: A Helm chart for deploying the oc-mongo component
version: 0.1.0
appVersion: "1.0"

View File

@@ -1,10 +0,0 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ .Values.persistence.name }}
spec:
accessModes:
- {{ .Values.persistence.accessMode }}
resources:
requests:
storage: {{ .Values.persistence.size }}

View File

@@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ .Release.Name }}-mongo-secret
type: Opaque
data:
username: {{ .Values.secret.username }}
password: {{ .Values.secret.password }}

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
spec:
selector:
app: mongo
ports:
- protocol: TCP
port: {{ .Values.service.port }}
targetPort: {{ .Values.service.port }}

View File

@@ -1,31 +0,0 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ .Release.Name }}-mongo
labels:
app: mongo
spec:
serviceName: "{{ .Release.Name }}-mongo"
replicas: {{ .Values.replicaCount }}
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-persistent-storage
mountPath: /data/configdb
volumes:
- name: mongo-persistent-storage
persistentVolumeClaim:
claimName: {{ .Values.persistence.name }}

View File

@@ -1,19 +0,0 @@
replicaCount: 1
image:
repository: mongo
tag: latest
pullPolicy: IfNotPresent
service:
port: 27017
persistence:
name: mongo-pvc-helm
enabled: true
accessMode: ReadWriteOnce
size: 1Gi
secret:
username: dGVzdA== # base64 encoding of 'test'
password: dGVzdA== # base64 encoding of 'test'

View File

@@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@@ -1,24 +0,0 @@
apiVersion: v2
name: occhart
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@@ -1,32 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: dex
labels:
app: dex
spec:
replicas: 1
selector:
matchLabels:
app: dex
template:
metadata:
labels:
app: dex
spec:
containers:
- name: dex
image: quay.io/dexidp/dex:v2.27.0
ports:
- containerPort: 5556
args:
- serve
- /etc/dex/cfg/config.yaml
volumeMounts:
- mountPath: /etc/dex/cfg
name: config
volumes:
- name: config
configMap:
name: dex-config

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: dex
labels:
app: dex
spec:
ports:
- port: 5556
selector:
app: dex

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: grafana
labels:
app: grafana
spec:
replicas: 1
selector:
matchLabels:
app: grafana
template:
metadata:
labels:
app: grafana
spec:
containers:
- name: grafana
image: grafana/grafana:7.5.0
ports:
- containerPort: 3000

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: grafana
labels:
app: grafana
spec:
ports:
- port: 3000
selector:
app: grafana

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ldap
labels:
app: ldap
spec:
replicas: 1
selector:
matchLabels:
app: ldap
template:
metadata:
labels:
app: ldap
spec:
containers:
- name: ldap
image: osixia/openldap:1.5.0
ports:
- containerPort: 389

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: ldap
labels:
app: ldap
spec:
ports:
- port: 389
selector:
app: ldap

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: loki
labels:
app: loki
spec:
replicas: 1
selector:
matchLabels:
app: loki
template:
metadata:
labels:
app: loki
spec:
containers:
- name: loki
image: grafana/loki:2.2.0
ports:
- containerPort: 3100

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: loki
labels:
app: loki
spec:
ports:
- port: 3100
selector:
app: loki

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongo
labels:
app: mongo
spec:
replicas: 1
selector:
matchLabels:
app: mongo
template:
metadata:
labels:
app: mongo
spec:
containers:
- name: mongo
image: mongo:4.4
ports:
- containerPort: 27017

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: mongo
labels:
app: mongo
spec:
ports:
- port: 27017
selector:
app: mongo

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nats
labels:
app: nats
spec:
replicas: 1
selector:
matchLabels:
app: nats
template:
metadata:
labels:
app: nats
spec:
containers:
- name: nats
image: nats:2.1.9
ports:
- containerPort: 4222

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nats
labels:
app: nats
spec:
ports:
- port: 4222
selector:
app: nats

View File

@@ -1,37 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: traefik
labels:
app: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
spec:
containers:
- name: traefik
image: traefik:v2.4
ports:
- name: web
containerPort: 80
- name: admin
containerPort: 8080
args:
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --providers.kubernetescrd
- --api
volumeMounts:
- mountPath: /etc/traefik
name: traefik-config
volumes:
- name: traefik-config
configMap:
name: traefik-config

View File

@@ -1,81 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: <your-domain>
http:
paths:
- path: /front
pathType: Prefix
backend:
service:
name: front-service
port:
number: 80
- path: /back1
pathType: Prefix
backend:
service:
name: back1-service
port:
number: 80
- path: /back2
pathType: Prefix
backend:
service:
name: back2-service
port:
number: 80
- path: /mongo
pathType: Prefix
backend:
service:
name: mongo
port:
number: 27017
- path: /nats
pathType: Prefix
backend:
service:
name: nats
port:
number: 4222
- path: /zinc
pathType: Prefix
backend:
service:
name: zinc
port:
number: 4080
- path: /dex
pathType: Prefix
backend:
service:
name: dex
port:
number: 5556
- path: /ldap
pathType: Prefix
backend:
service:
name: ldap
port:
number: 389
- path: /grafana
pathType: Prefix
backend:
service:
name: grafana
port:
number: 3000
- path: /loki
pathType: Prefix
backend:
service:
name: loki
port:
number: 3100

View File

@@ -1,17 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik
labels:
app: traefik
spec:
type: LoadBalancer
ports:
- port: 80
name: web
targetPort: 80
- port: 8080
name: admin
targetPort: 8080
selector:
app: traefik

View File

@@ -1,21 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: zinc
labels:
app: zinc
spec:
replicas: 1
selector:
matchLabels:
app: zinc
template:
metadata:
labels:
app: zinc
spec:
containers:
- name: zinc
image: public.ecr.aws/zinclabs/zinc:latest
ports:
- containerPort: 4080

View File

@@ -1,11 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: zinc
labels:
app: zinc
spec:
ports:
- port: 4080
selector:
app: zinc

View File

@@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "occhart.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch its status by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "occhart.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "occhart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "occhart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "occhart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "occhart.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "occhart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "occhart.labels" -}}
helm.sh/chart: {{ include "occhart.chart" . }}
{{ include "occhart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "occhart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "occhart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "occhart.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "occhart.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@@ -1,68 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "occhart.fullname" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "occhart.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "occhart.labels" . | nindent 8 }}
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "occhart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
{{- toYaml .Values.livenessProbe | nindent 12 }}
readinessProbe:
{{- toYaml .Values.readinessProbe | nindent 12 }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.volumeMounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -1,32 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "occhart.fullname" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "occhart.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "occhart.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "occhart.fullname" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "occhart.selectorLabels" . | nindent 4 }}

View File

@@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "occhart.serviceAccountName" . }}
labels:
{{- include "occhart.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
automountServiceAccountToken: {{ .Values.serviceAccount.automount }}
{{- end }}

View File

@@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "occhart.fullname" . }}-test-connection"
labels:
{{- include "occhart.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "occhart.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@@ -1,107 +0,0 @@
# Default values for occhart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Automatically mount a ServiceAccount's API credentials?
automount: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podLabels: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
# Additional volumes on the output Deployment definition.
volumes: []
# - name: foo
# secret:
# secretName: mysecret
# optional: false
# Additional volumeMounts on the output Deployment definition.
volumeMounts: []
# - name: foo
# mountPath: "/etc/foo"
# readOnly: true
nodeSelector: {}
tolerations: []
affinity: {}

60
k8s/README.md Normal file
View File

@@ -0,0 +1,60 @@
## TO START DEMO
`./start-demo.sh <num of cluster>`
To navigate between clusters : `export KUBECONFIG=configCluster<n>`
After mongodb pod launch on every cluster:
`./add-datas-demo.sh <num of cluster>`
## Deploy the opencloud chart
```
./start.sh <mode: dev|prod default:dev> <branche | default:main>
```
Feel free to modify/create a new opencloud/dev-values.yaml. Provided setup should work out of the box, but is not suitable for production usage.
## k8s deployment
- Pull oc-k8s file put it in /usr/local/bin
- oc-k8s create values <namespace>
## Hostname settings
Edit your /etc/hosts file, and add following line:
```
127.0.0.1 beta.opencloud.com
```
## Done
Everything should be operational now, go to http://beta.opencloud.com and enjoy the ride
# Prebuilt microservices deployment procedure
TODO
# First steps
Go to http://beta.opencloud.com/users
Log in using default user/password combo ldapadmin/ldapadmin
Create a new user, or change the default one
Go to http://beta.opencloud.com
Log in using your fresh credentials
Do stuff
You can go to http://beta.opencloud.com/mongoexpress
... for mongo express web client access (default login/password is test/testme)
You can go to http://localhost/dashboard/
... for access to Traefik reverse proxy front-end

9
k8s/add-datas-demo.sh Executable file
View File

@@ -0,0 +1,9 @@
#!/bin/bash
start=1
end=${1:-1}
for ((i=start; i<=end; i++)); do
export KUBECONFIG=~/.kube/configCluster$i
oc-k8s upgrade db -d opencloud -r cluster-$i -n cluster-$i -f ./datas/cluster-$i
done

656
k8s/cluster-1-values.yaml Normal file
View File

@@ -0,0 +1,656 @@
env: cluster-1 # For storage class provisioning
clusterName: cluster-1
hostNetwork: true
host: beta.opencloud.com
hostPort: 9600
registryHost: opencloudregistry
scheme: http
secrets:
keys:
enabled: true
name: libp2p-keys
mountPath: ./pem
psk:
enabled: true
name: libp2p-psk
mountPath: ./psk
mongo-express:
enabled: true
mongodbServer: "cluster-1-mongodb.cluster-1" # TO LOOK AFTER
mongodbPort: 27017
mongodbEnableAdmin: true
mongodbAdminUsername: admin
mongodbAdminPassword: admin
siteBaseUrl: /mongoexpress
basicAuthUsername: admin
basicAuthPassword: admin
mongodb:
enabled: false
mongodb:
enabled: true
global:
defaultStorageClass: "standard"
storageClass: "standard"
architecture: standalone
useStatefulSet: false
auth:
enabled: true
rootUser: admin
rootPassword: admin
databases: [ opencloud ]
usernames: [ admin ]
passwords: [ admin ]
resourcesPreset: "small"
replicaCount: 1
persistence:
enabled: true
create: false # do not auto-create
existingClaim: mongo-pvc
storageClassName: "standard"
accessModes:
- ReadWriteOnce
size: 5Gi
persistentVolumeClaimRetentionPolicy:
enabled: true
whenDeleted: Retain
whenScaled: Retain
arbiter:
enabled: false
livenessProbe:
enabled: true
readinessProbe:
enabled: true
nats:
enabled: true
extraEnv:
- name: NATS_MAX_FILE_DESCRIPTORS
value: "65536"
extraVolumeMounts:
- name: nats-config
mountPath: /etc/nats
config:
jetstream:
enabled: true
fileStore:
enabled: true
dir: /data/jetstream # mountPath used by template
# pvc block must live here
pvc:
enabled: true
# if you already created the claim, set existingClaim:
existingClaim: nats-pvc
# storageClassName: local-path or standard (use the SC in your cluster)
storageClassName: standard
size: 50Gi
# name is the volume name used in volumeMounts; keep it simple
name: nats-jetstream
openldap:
enabled: true
test:
enabled: false
ltb-passwd:
enabled: false
replicaCount: 1
image:
repository: osixia/openldap
tls:
enabled: false
env:
LDAP_ORGANISATION: Opencloud
LDAP_DOMAIN: opencloud.com
LDAP_BACKEND: "mdb"
LDAP_TLS: "false"
LDAP_TLS_ENFORCE: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
adminPassword: admin
configPassword: configadmin
phpldapadmin:
enabled: false
persistence:
enabled: true
create: false # do not auto-create
existingClaim: openldap-pvc
accessMode: ReadWriteOnce
size: 10Mi
storageClassName: ""
replication:
enabled: false
externalLDAP:
enabled: false
url: 389
bindDN: uid=admin,dc=opencloud,dc=com
bindPassword: admin
customLdifFiles:
01-schema.ldif: |-
dn: ou=groups,dc=opencloud,dc=com
objectClass: organizationalUnit
ou: groups
dn: ou=users,dc=opencloud,dc=com
objectClass: organizationalUnit
ou: users
dn: cn=lastGID,dc=opencloud,dc=com
objectClass: device
objectClass: top
description: Records the last GID used to create a Posix group. This prevents the re-use of a GID from a deleted group.
cn: lastGID
serialNumber: 2001
dn: cn=lastUID,dc=opencloud,dc=com
objectClass: device
objectClass: top
serialNumber: 2001
description: Records the last UID used to create a Posix account. This prevents the re-use of a UID from a deleted account.
cn: lastUID
dn: cn=everybody,ou=groups,dc=opencloud,dc=com
objectClass: top
objectClass: posixGroup
cn: everybody
memberUid: admin
gidNumber: 2003
02-ldapadmin.ldif : |-
dn: cn=ldapadmin,ou=groups,dc=opencloud,dc=com
objectClass: top
objectClass: posixGroup
cn: ldapadmin
memberUid: ldapadmin
gidNumber: 2001
dn: uid=ldapadmin,ou=users,dc=opencloud,dc=com
givenName: ldap
sn: admin
uid: ldapadmin
cn: ldapadmin
mail: ldapadmin@example.com
objectClass: person
objectClass: inetOrgPerson
objectClass: posixAccount
userPassword: sai1yeiT
uidNumber: 2001
gidNumber: 2001
loginShell: /bin/bash
homeDirectory: /home/ldapadmin
03-opencloudadmin.ldif : |-
dn: uid=admin,ou=users,dc=opencloud,dc=com
objectClass: inetOrgPerson
cn: Admin
sn: Istrator
uid: admin
userPassword: {SSHA}HMWJO7XCw80he2lqMf0PHzvvF14p6aLE
mail: morgane.roques@irt-saintexupery.com
ou: users
dn: ou=AppRoles,dc=opencloud,dc=com
objectClass: organizationalunit
ou: AppRoles
description: AppRoles
dn: ou=Opencloud,ou=AppRoles,dc=opencloud,dc=com
objectClass: organizationalunit
ou: Opencloud
description: Opencloud
prometheus:
enabled: true
enableTraefikProxyIntegration: true
server:
persistentVolume:
enabled: true
size: 5Gi
service:
type: ClusterIP
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 128m
memory: 256Mi
# ldap user manager configuration
ldapUserManager:
enabled: true
env:
SERVER_HOSTNAME: ldap.opencloud.com
LDAP_BASE_DN: dc=opencloud,dc=com
LDAP_REQUIRE_STARTTLS: "false"
LDAP_ADMINS_GROUP: ldapadmin
LDAP_ADMIN_BIND_DN: cn=admin,dc=opencloud,dc=com
LDAP_ADMIN_BIND_PWD: "{SSHA}HMWJO7XCw80he2lqMf0PHzvvF14p6aLE"
LDAP_IGNORE_CERT_ERRORS: "true"
EMAIL_DOMAIN: ""
NO_HTTPS: "true"
SERVER_PATH: "/users"
ORGANISATION_NAME: Opencloud
LDAP_USER_OU: users
LDAP_GROUP_OU: groups
ACCEPT_WEAK_PASSWORDS: "true"
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
traefik:
enabled: true
service:
type: NodePort
ports:
web:
port: 80
nodePort: 30950
websecure:
port: 443
nodePort: 30951
ingressRoute:
dashboard:
enabled: true
matchRule: Host(`localhost`) && PathPrefix(`/api`) || PathPrefix(`/dashboard`)
entryPoints: [web]
ports:
web:
port: 80
nodePort: 30950
websecure:
port: 443
nodePort: 30951
hydra:
enabled: true
maester:
enabled: true
secret:
enabled: false
nameOverride: hydra-secret
hashSumEnabled: false
hydra:
dev: true
existingSecret: hydra-secret
config:
dsn: memory
urls:
# login: https://localhost-login/authentication/login
# consent: https://localhost-consent/consent/consent
# logout: https://localhost-logout/authentication/logout
self:
issuer: "http://cluster-1-hydra-public.cluster-1:4444/"
keto:
enabled: true
keto:
config:
serve:
read:
port: 4466
write:
port: 4467
metrics:
port: 4468
namespaces:
- id: 0
name: open-cloud
dsn: memory
loki:
enabled: true
loki:
auth_enabled: false
commonConfig:
replication_factor: 1
storage:
storageClassName: standard
type: filesystem
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
admin_api_directory: /var/loki/admin
storage_config:
boltdb_shipper:
active_index_directory: /var/loki/index
filesystem:
directory: /var/loki/chunks
limits_config:
allow_structured_metadata: false
schemaConfig:
configs:
- from: "2020-01-01"
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ingester:
chunk_encoding: snappy
tracing:
enabled: true
querier:
max_concurrent: 2
deploymentMode: SingleBinary
singleBinary:
extraVolumes:
- name: loki-storage
persistentVolumeClaim:
claimName: loki-pvc
persistence:
enabled: false # Deactivate loki auto provisioning, rely on existing PVC
accessMode: ReadWriteOnce
size: 1Gi
storageClassName: standard
create: false
claimName: loki-pvc
extraVolumeMounts:
- name: loki-storage
mountPath: /var/loki
replicas: 1
resources:
limits:
cpu: 3
memory: 4Gi
requests:
cpu: 1
memory: 0.5Gi
extraEnv:
- name: GOMEMLIMIT
value: 3750MiB
chunksCache:
# default is 500MB, with limited memory keep this smaller
writebackSizeLimit: 10MB
# Enable minio for storage
minio:
enabled: false
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
grafana:
enabled: true
adminUser: admin
adminPassword: admin
persistence:
enabled: true
size: 1Gi
service:
type: ClusterIP
argo-workflows:
enabled: false
workflow:
serviceAccount:
create: false
name: argo-workflow
rbac:
create: false # Manual provisioning
controller:
workflowNamespaces: [] #All of them
controller:
workflowDefaults:
spec:
serviceAccountName: argo-workflow
ocAuth:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-auth:latest
authType: hydra
keto:
adminRole: admin
hydra:
openCloudOauth2ClientSecretName: opencloud-oauth2-client-secret
ldap:
bindDn: cn=admin,dc=opencloud,dc=com
binPwd: admin
baseDn: dc=opencloud,dc=com
userBaseDn: ou=users,dc=opencloud,dc=com
roleBaseDn: ou=AppRoles,dc=opencloud,dc=com
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocFront:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-front:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocWorkspace:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-workspace:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocShared:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-shared:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocWorkflow:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-workflow:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocCatalog:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-catalog:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocPeer:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-peer:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocDatacenter:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-datacenter:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocDiscovery:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-schedulerd:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocSchedulerd:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-schedulerd:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocScheduler:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-scheduler:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
docker-registry-ui:
enabled: true
ui:
title: "opencloud docker registry"
proxy: true
dockerRegistryUrl: "http://cluster-1-docker-registry-ui-registry-server.cluster-1.svc.cluster.local:5000"
registry:
secretName: regcred
enabled: true
dataVolume:
persistentVolumeClaim:
claimName: docker-registry-pvc
persistence:
create: false
existingClaim: docker-registry-pvc
accessMode: ReadWriteOnce
storage: 5Gi
storageClassName: "standard"

656
k8s/cluster-2-values.yaml Normal file
View File

@@ -0,0 +1,656 @@
env: cluster-2 # For storage class provisioning
clusterName: cluster-2
hostNetwork: true
host: beta.opencloud.com
hostPort: 9700
registryHost: opencloudregistry
scheme: http
secrets:
keys:
enabled: true
name: libp2p-keys
mountPath: ./pem
psk:
enabled: true
name: libp2p-psk
mountPath: ./psk
mongo-express:
enabled: true
mongodbServer: "cluster-2-mongodb.cluster-2" # TO LOOK AFTER
mongodbPort: 27017
mongodbEnableAdmin: true
mongodbAdminUsername: admin
mongodbAdminPassword: admin
siteBaseUrl: /mongoexpress
basicAuthUsername: admin
basicAuthPassword: admin
mongodb:
enabled: false
mongodb:
enabled: true
global:
defaultStorageClass: "standard"
storageClass: "standard"
architecture: standalone
useStatefulSet: false
auth:
enabled: true
rootUser: admin
rootPassword: admin
databases: [ opencloud ]
usernames: [ admin ]
passwords: [ admin ]
resourcesPreset: "small"
replicaCount: 1
persistence:
enabled: true
create: false # do not auto-create
existingClaim: mongo-pvc
storageClassName: "standard"
accessModes:
- ReadWriteOnce
size: 5Gi
persistentVolumeClaimRetentionPolicy:
enabled: true
whenDeleted: Retain
whenScaled: Retain
arbiter:
enabled: false
livenessProbe:
enabled: true
readinessProbe:
enabled: true
nats:
enabled: true
extraEnv:
- name: NATS_MAX_FILE_DESCRIPTORS
value: "65536"
extraVolumeMounts:
- name: nats-config
mountPath: /etc/nats
config:
jetstream:
enabled: true
fileStore:
enabled: true
dir: /data/jetstream # mountPath used by template
# pvc block must live here
pvc:
enabled: true
# if you already created the claim, set existingClaim:
existingClaim: nats-pvc
# storageClassName: local-path or standard (use the SC in your cluster)
storageClassName: standard
size: 50Gi
# name is the volume name used in volumeMounts; keep it simple
name: nats-jetstream
openldap:
enabled: true
test:
enabled: false
ltb-passwd:
enabled: false
replicaCount: 1
image:
repository: osixia/openldap
tls:
enabled: false
env:
LDAP_ORGANISATION: Opencloud
LDAP_DOMAIN: opencloud.com
LDAP_BACKEND: "mdb"
LDAP_TLS: "false"
LDAP_TLS_ENFORCE: "false"
LDAP_REMOVE_CONFIG_AFTER_SETUP: "true"
adminPassword: admin
configPassword: configadmin
phpldapadmin:
enabled: false
persistence:
enabled: true
create: false # do not auto-create
existingClaim: openldap-pvc
accessMode: ReadWriteOnce
size: 10Mi
storageClassName: ""
replication:
enabled: false
externalLDAP:
enabled: false
url: 389
bindDN: uid=admin,dc=opencloud,dc=com
bindPassword: admin
customLdifFiles:
01-schema.ldif: |-
dn: ou=groups,dc=opencloud,dc=com
objectClass: organizationalUnit
ou: groups
dn: ou=users,dc=opencloud,dc=com
objectClass: organizationalUnit
ou: users
dn: cn=lastGID,dc=opencloud,dc=com
objectClass: device
objectClass: top
description: Records the last GID used to create a Posix group. This prevents the re-use of a GID from a deleted group.
cn: lastGID
serialNumber: 2001
dn: cn=lastUID,dc=opencloud,dc=com
objectClass: device
objectClass: top
serialNumber: 2001
description: Records the last UID used to create a Posix account. This prevents the re-use of a UID from a deleted account.
cn: lastUID
dn: cn=everybody,ou=groups,dc=opencloud,dc=com
objectClass: top
objectClass: posixGroup
cn: everybody
memberUid: admin
gidNumber: 2003
02-ldapadmin.ldif : |-
dn: cn=ldapadmin,ou=groups,dc=opencloud,dc=com
objectClass: top
objectClass: posixGroup
cn: ldapadmin
memberUid: ldapadmin
gidNumber: 2001
dn: uid=ldapadmin,ou=users,dc=opencloud,dc=com
givenName: ldap
sn: admin
uid: ldapadmin
cn: ldapadmin
mail: ldapadmin@example.com
objectClass: person
objectClass: inetOrgPerson
objectClass: posixAccount
userPassword: sai1yeiT
uidNumber: 2001
gidNumber: 2001
loginShell: /bin/bash
homeDirectory: /home/ldapadmin
03-opencloudadmin.ldif : |-
dn: uid=admin,ou=users,dc=opencloud,dc=com
objectClass: inetOrgPerson
cn: Admin
sn: Istrator
uid: admin
userPassword: {SSHA}HMWJO7XCw80he2lqMf0PHzvvF14p6aLE
mail: morgane.roques@irt-saintexupery.com
ou: users
dn: ou=AppRoles,dc=opencloud,dc=com
objectClass: organizationalunit
ou: AppRoles
description: AppRoles
dn: ou=Opencloud,ou=AppRoles,dc=opencloud,dc=com
objectClass: organizationalunit
ou: Opencloud
description: Opencloud
prometheus:
enabled: true
enableTraefikProxyIntegration: true
server:
persistentVolume:
enabled: true
size: 5Gi
service:
type: ClusterIP
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 128m
memory: 256Mi
# ldap user manager configuration
ldapUserManager:
enabled: true
env:
SERVER_HOSTNAME: ldap.opencloud.com
LDAP_BASE_DN: dc=opencloud,dc=com
LDAP_REQUIRE_STARTTLS: "false"
LDAP_ADMINS_GROUP: ldapadmin
LDAP_ADMIN_BIND_DN: cn=admin,dc=opencloud,dc=com
LDAP_ADMIN_BIND_PWD: "{SSHA}HMWJO7XCw80he2lqMf0PHzvvF14p6aLE"
LDAP_IGNORE_CERT_ERRORS: "true"
EMAIL_DOMAIN: ""
NO_HTTPS: "true"
SERVER_PATH: "/users"
ORGANISATION_NAME: Opencloud
LDAP_USER_OU: users
LDAP_GROUP_OU: groups
ACCEPT_WEAK_PASSWORDS: "true"
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
traefik:
enabled: true
service:
type: NodePort
ports:
web:
port: 80
nodePort: 30950
websecure:
port: 443
nodePort: 30951
ingressRoute:
dashboard:
enabled: true
matchRule: Host(`localhost`) && PathPrefix(`/api`) || PathPrefix(`/dashboard`)
entryPoints: [web]
ports:
web:
port: 80
nodePort: 30950
websecure:
port: 443
nodePort: 30951
hydra:
enabled: true
maester:
enabled: true
secret:
enabled: false
nameOverride: hydra-secret
hashSumEnabled: false
hydra:
dev: true
existingSecret: hydra-secret
config:
dsn: memory
urls:
# login: https://localhost-login/authentication/login
# consent: https://localhost-consent/consent/consent
# logout: https://localhost-logout/authentication/logout
self:
issuer: "http://cluster-2-hydra-public.cluster-2:4444/"
keto:
enabled: true
keto:
config:
serve:
read:
port: 4466
write:
port: 4467
metrics:
port: 4468
namespaces:
- id: 0
name: open-cloud
dsn: memory
loki:
enabled: true
loki:
auth_enabled: false
commonConfig:
replication_factor: 1
storage:
storageClassName: standard
type: filesystem
filesystem:
chunks_directory: /var/loki/chunks
rules_directory: /var/loki/rules
admin_api_directory: /var/loki/admin
storage_config:
boltdb_shipper:
active_index_directory: /var/loki/index
filesystem:
directory: /var/loki/chunks
limits_config:
allow_structured_metadata: false
schemaConfig:
configs:
- from: "2020-01-01"
store: boltdb-shipper
object_store: filesystem
schema: v11
index:
prefix: index_
period: 24h
ingester:
chunk_encoding: snappy
tracing:
enabled: true
querier:
max_concurrent: 2
deploymentMode: SingleBinary
singleBinary:
extraVolumes:
- name: loki-storage
persistentVolumeClaim:
claimName: loki-pvc
persistence:
enabled: false # Deactivate loki auto provisioning, rely on existing PVC
accessMode: ReadWriteOnce
size: 1Gi
storageClassName: standard
create: false
claimName: loki-pvc
extraVolumeMounts:
- name: loki-storage
mountPath: /var/loki
replicas: 1
resources:
limits:
cpu: 3
memory: 4Gi
requests:
cpu: 1
memory: 0.5Gi
extraEnv:
- name: GOMEMLIMIT
value: 3750MiB
chunksCache:
# default is 500MB, with limited memory keep this smaller
writebackSizeLimit: 10MB
# Enable minio for storage
minio:
enabled: false
# Zero out replica counts of other deployment modes
backend:
replicas: 0
read:
replicas: 0
write:
replicas: 0
ingester:
replicas: 0
querier:
replicas: 0
queryFrontend:
replicas: 0
queryScheduler:
replicas: 0
distributor:
replicas: 0
compactor:
replicas: 0
indexGateway:
replicas: 0
bloomCompactor:
replicas: 0
bloomGateway:
replicas: 0
grafana:
enabled: true
adminUser: admin
adminPassword: admin
persistence:
enabled: true
size: 1Gi
service:
type: ClusterIP
argo-workflows:
enabled: false
workflow:
serviceAccount:
create: false
name: argo-workflow
rbac:
create: false # Manual provisioning
controller:
workflowNamespaces: [] #All of them
controller:
workflowDefaults:
spec:
serviceAccountName: argo-workflow
ocAuth:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-auth:latest
authType: hydra
keto:
adminRole: admin
hydra:
openCloudOauth2ClientSecretName: opencloud-oauth2-client-secret
ldap:
bindDn: cn=admin,dc=opencloud,dc=com
binPwd: admin
baseDn: dc=opencloud,dc=com
userBaseDn: ou=users,dc=opencloud,dc=com
roleBaseDn: ou=AppRoles,dc=opencloud,dc=com
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocFront:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-front:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocWorkspace:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-workspace:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocShared:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-shared:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocWorkflow:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-workflow:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocCatalog:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-catalog:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocPeer:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-peer:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocDatacenter:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-datacenter:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocDiscovery:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-schedulerd:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocSchedulerd:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-schedulerd:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
ocScheduler:
enabled: true
enableTraefikProxyIntegration: true
image: opencloudregistry/oc-scheduler:latest
resources:
limits:
cpu: 128m
memory: 256Mi
requests:
cpu: 128m
memory: 256Mi
replicas: 1
hpa:
enabled: true
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
docker-registry-ui:
enabled: true
ui:
title: "opencloud docker registry"
proxy: true
dockerRegistryUrl: "http://cluster-2-docker-registry-ui-registry-server.cluster-2.svc.cluster.local:5000"
registry:
secretName: regcred
enabled: true
dataVolume:
persistentVolumeClaim:
claimName: docker-registry-pvc
persistence:
create: false
existingClaim: docker-registry-pvc
accessMode: ReadWriteOnce
storage: 5Gi
storageClassName: "standard"

2
k8s/conf/cluster-1.conf Normal file
View File

@@ -0,0 +1,2 @@
CLUSTER_NAME=cluster-1
PORT=9600

2
k8s/conf/cluster-2.conf Normal file
View File

@@ -0,0 +1,2 @@
CLUSTER_NAME=cluster-2
PORT=9700

Some files were not shown because too many files have changed in this diff Show More