Skip to content

Commit 0c8051c

Browse files
authored
Merge pull request #400 from openinfradev/lma-ns
lma 배포 네임스페이스 변경
2 parents 34312e6 + 70c5778 commit 0c8051c

File tree

4 files changed

+36
-36
lines changed

4 files changed

+36
-36
lines changed

deploy_apps/tks-lma-federation-wftpl.yaml

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -308,16 +308,16 @@ spec:
308308
kube_secret=$(kubectl get secret -n ${cluster_id} ${cluster_id}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d)
309309
cat <<< "$kube_secret" > kubeconfig
310310
311-
while ! kubectl --kubeconfig=kubeconfig get svc -n lma grafana --ignore-not-found; do
311+
while ! kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana --ignore-not-found; do
312312
echo "Waiting for the grafana service to appear in cluster ${cluster_id} (5s)"
313313
sleep 5
314314
done
315315
316316
grafana_ep_secret=$(kubectl get secret -n ${cluster_id} tks-endpoint-secret -o jsonpath='{.data.grafana}'| base64 -d ) || grafana_ep_secret=""
317317
318318
if [ "$grafana_ep_secret" == "" ]; then
319-
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]; do
320-
if [[ "$(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath='{.spec.type}')" != "LoadBalancer" ]]; then
319+
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]; do
320+
if [[ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana -o jsonpath='{.spec.type}')" != "LoadBalancer" ]]; then
321321
log "FAIL" "A service for the grafana in ${cluster_id} is not configured properly.(No LoadBalancer)"
322322
exit -1
323323
fi
@@ -326,7 +326,7 @@ spec:
326326
sleep 3
327327
done
328328
329-
endpoint=$(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
329+
endpoint=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
330330
else
331331
endpoint=${grafana_ep_secret}
332332
fi
@@ -470,12 +470,12 @@ spec:
470470
471471
kube_secret=$(kubectl get secret -n ${cluster_id} ${cluster_id}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d)
472472
cat <<< "$kube_secret" > kubeconfig
473-
pods=$(kubectl --kubeconfig kubeconfig get pods -n lma | grep grafana | awk '{print $1}')
473+
pods=$(kubectl --kubeconfig kubeconfig get pods -n taco-system | grep grafana | awk '{print $1}')
474474
475475
for pod in $pods
476476
do
477477
echo "Deleting pod $pod"
478-
kubectl --kubeconfig kubeconfig delete pod $pod -n lma --ignore-not-found=true
478+
kubectl --kubeconfig kubeconfig delete pod $pod -n taco-system --ignore-not-found=true
479479
done
480480

481481
envFrom:

deploy_apps/tks-primary-cluster.yaml

Lines changed: 25 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -110,9 +110,9 @@ spec:
110110
- name: list
111111
value: |
112112
[
113-
{ "app_group": "lma", "path": "minio", "namespace": "lma", "target_cluster": "" },
114-
{ "app_group": "lma", "path": "loki", "namespace": "lma", "target_cluster": "" },
115-
{ "app_group": "lma", "path": "loki-user", "namespace": "lma", "target_cluster": "" }
113+
{ "app_group": "lma", "path": "minio", "namespace": "taco-system", "target_cluster": "" },
114+
{ "app_group": "lma", "path": "loki", "namespace": "taco-system", "target_cluster": "" },
115+
{ "app_group": "lma", "path": "loki-user", "namespace": "taco-system", "target_cluster": "" }
116116
]
117117
when: "{{workflow.parameters.object_store}} == minio"
118118

@@ -148,9 +148,9 @@ spec:
148148
- name: list
149149
value: |
150150
[
151-
{ "app_group": "lma", "path": "thanos-config", "namespace": "lma", "target_cluster": "" },
152-
{ "app_group": "lma", "path": "thanos", "namespace": "lma", "target_cluster": "" },
153-
{ "app_group": "lma", "path": "grafana", "namespace": "lma", "target_cluster": "" }
151+
{ "app_group": "lma", "path": "thanos-config", "namespace": "taco-system", "target_cluster": "" },
152+
{ "app_group": "lma", "path": "thanos", "namespace": "taco-system", "target_cluster": "" },
153+
{ "app_group": "lma", "path": "grafana", "namespace": "taco-system", "target_cluster": "" }
154154
]
155155
156156
- - name: update-status-done
@@ -223,8 +223,8 @@ spec:
223223
value: |
224224
[
225225
{ "app_group": "lma", "path": "lma-bucket", "namespace": "taco-system", "target_cluster": "" },
226-
{ "app_group": "lma", "path": "loki", "namespace": "lma", "target_cluster": "" },
227-
{ "app_group": "lma", "path": "loki-user", "namespace": "lma", "target_cluster": "" }
226+
{ "app_group": "lma", "path": "loki", "namespace": "taco-system", "target_cluster": "" },
227+
{ "app_group": "lma", "path": "loki-user", "namespace": "taco-system", "target_cluster": "" }
228228
]
229229
230230
- name: update-eps-for-thanos
@@ -473,9 +473,9 @@ spec:
473473
LOKI_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.loki}'| base64 -d )
474474
475475
if [[ "$LOKI_SERVICE" == "" ]]; then
476-
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
476+
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
477477
do
478-
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
478+
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
479479
log "FAIL" "The infras on primary are not cofigured properly.(No LoadBalancer)"
480480
exit -1
481481
fi
@@ -484,8 +484,8 @@ spec:
484484
sleep 3
485485
done
486486
487-
LOKI_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
488-
LOKI_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
487+
LOKI_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
488+
LOKI_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
489489
else
490490
LOKI_HOST=$(echo $LOKI_SERVICE | awk -F : '{print $1}')
491491
LOKI_PORT=$(echo $LOKI_SERVICE | awk -F : '{print $2}')
@@ -497,9 +497,9 @@ spec:
497497
LOKI_USER_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.loki_user}'| base64 -d )
498498
499499
if [[ "$LOKI_USER_SERVICE" == "" ]]; then
500-
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
500+
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
501501
do
502-
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
502+
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
503503
log "FAIL" "The infras on primary are not cofigured properly.(No LoadBalancer)"
504504
exit -1
505505
fi
@@ -508,8 +508,8 @@ spec:
508508
sleep 3
509509
done
510510
511-
LOKI_USER_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
512-
LOKI_USER_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
511+
LOKI_USER_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
512+
LOKI_USER_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
513513
else
514514
LOKI_USER_HOST=$(echo $LOKI_USER_SERVICE | awk -F : '{print $1}')
515515
LOKI_USER_PORT=$(echo $LOKI_USER_SERVICE | awk -F : '{print $2}')
@@ -521,8 +521,8 @@ spec:
521521
if [[ "${primary_cluster}" == "${current_cluster}" ]] && [[ "$OBJECT_STORE" == "minio" ]]; then
522522
S3_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.minio}'| base64 -d )
523523
if [[ "$S3_SERVICE" == "" ]]; then
524-
S3_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma minio -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
525-
S3_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma minio -o jsonpath="{.spec.ports[0].port}")
524+
S3_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system minio -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
525+
S3_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system minio -o jsonpath="{.spec.ports[0].port}")
526526
S3_SERVICE=${S3_HOST}:${S3_PORT}
527527
fi
528528
fi
@@ -741,10 +741,10 @@ spec:
741741
PROMETHEUS_URL=$(kubectl get secret -n ${member} tks-endpoint-secret -o jsonpath='{.data.prometheus}'| base64 -d )
742742
if [[ "$PROMETHEUS_URL" != "" ]]; then
743743
eplist="${eplist}, \"${PROMETHEUS_URL}\""
744-
elif [ `kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external --ignore-not-found=true | grep -v NAME | wc -l ` -eq 1 ]; then
745-
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
744+
elif [ `kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external --ignore-not-found=true | grep -v NAME | wc -l ` -eq 1 ]; then
745+
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
746746
do
747-
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
747+
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
748748
log "FAIL" "A service for the thanos-sidcar in ${member} is not cofigured properly.(No LoadBalancer)"
749749
exit -1
750750
fi
@@ -753,7 +753,7 @@ spec:
753753
sleep 3
754754
done
755755
756-
eplist="${eplist}, \"$(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[0].hostname}"):$(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.spec.ports[0].port}")\""
756+
eplist="${eplist}, \"$(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[0].hostname}"):$(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.spec.ports[0].port}")\""
757757
else
758758
log "WARN" "Cluster(${member}) has no prometheus sidecar"
759759
fi
@@ -1068,7 +1068,7 @@ spec:
10681068
kubectl --kubeconfig /kube/value get secret -n ${cluster_id} ${cluster_id}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d > ~/kubeconfig
10691069
export KUBECONFIG=~/kubeconfig
10701070
1071-
kubectl delete job -n lma empty-buckets-before-delete-them --ignore-not-found=true
1071+
kubectl delete job -n taco-system empty-buckets-before-delete-them --ignore-not-found=true
10721072
cat <<EOF | kubectl apply -f -
10731073
apiVersion: batch/v1
10741074
kind: Job
@@ -1099,12 +1099,12 @@ spec:
10991099
backoffLimit: 4
11001100
EOF
11011101
1102-
while [ $(kubectl get -n lma job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}'|wc -c) -eq 0 ]; do
1102+
while [ $(kubectl get -n taco-system job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}'|wc -c) -eq 0 ]; do
11031103
echo "A job is not completed. wait 10 seconds...."
11041104
sleep 10
11051105
done
11061106
1107-
if [[ $(kubectl get -n lma job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}') == "Complete" ]]; then
1107+
if [[ $(kubectl get -n taco-system job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}') == "Complete" ]]; then
11081108
exit 0
11091109
else
11101110
exit -1

deploy_apps/tks-remove-lma-federation-wftpl.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -336,15 +336,15 @@ spec:
336336
kube_secret=$(kubectl get secret -n ${cluster_id} ${cluster_id}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d)
337337
cat <<< "$kube_secret" > kubeconfig
338338
339-
while ! kubectl --kubeconfig=kubeconfig get svc -n lma grafana --ignore-not-found; do
339+
while ! kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana --ignore-not-found; do
340340
echo "Waiting for the grafana service to appear in cluster ${cluster_id} (5s)"
341341
sleep 5
342342
done
343343
344344
grafana_ep_secret=$(kubectl get secret -n ${cluster_id} tks-endpoint-secret -o jsonpath='{.data.grafana}'| base64 -d ) || grafana_ep_secret=""
345345
if [ "$grafana_ep_secret" == "" ]; then
346-
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]; do
347-
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath='{.spec.type}')" != "LoadBalancer" ]; then
346+
while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]; do
347+
if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana -o jsonpath='{.spec.type}')" != "LoadBalancer" ]; then
348348
log "FAIL" "A service for the grafana in ${cluster_id} is not configured properly.(No LoadBalancer)"
349349
exit -1
350350
fi
@@ -353,7 +353,7 @@ spec:
353353
sleep 3
354354
done
355355
356-
endpoint=$(kubectl --kubeconfig=kubeconfig get svc -n lma grafana -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
356+
endpoint=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system grafana -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
357357
else
358358
endpoint=${grafana_ep_secret}
359359
fi

tests/validate-service-wftpl.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ spec:
4848
exit 1
4949
fi
5050
51-
kubectl get po -n lma | grep -Ev "Running|Completed|NAME"
51+
kubectl get po -n taco-system | grep -Ev "Running|Completed|NAME"
5252
if [[ $? != 1 ]]; then
5353
echo "ERROR - invalid pod status"
5454
exit 1

0 commit comments

Comments
 (0)