@@ -110,9 +110,9 @@ spec:
110
110
- name : list
111
111
value : |
112
112
[
113
- { "app_group": "lma", "path": "minio", "namespace": "lma ", "target_cluster": "" },
114
- { "app_group": "lma", "path": "loki", "namespace": "lma ", "target_cluster": "" },
115
- { "app_group": "lma", "path": "loki-user", "namespace": "lma ", "target_cluster": "" }
113
+ { "app_group": "lma", "path": "minio", "namespace": "taco-system ", "target_cluster": "" },
114
+ { "app_group": "lma", "path": "loki", "namespace": "taco-system ", "target_cluster": "" },
115
+ { "app_group": "lma", "path": "loki-user", "namespace": "taco-system ", "target_cluster": "" }
116
116
]
117
117
when : " {{workflow.parameters.object_store}} == minio"
118
118
@@ -148,9 +148,9 @@ spec:
148
148
- name : list
149
149
value : |
150
150
[
151
- { "app_group": "lma", "path": "thanos-config", "namespace": "lma ", "target_cluster": "" },
152
- { "app_group": "lma", "path": "thanos", "namespace": "lma ", "target_cluster": "" },
153
- { "app_group": "lma", "path": "grafana", "namespace": "lma ", "target_cluster": "" }
151
+ { "app_group": "lma", "path": "thanos-config", "namespace": "taco-system ", "target_cluster": "" },
152
+ { "app_group": "lma", "path": "thanos", "namespace": "taco-system ", "target_cluster": "" },
153
+ { "app_group": "lma", "path": "grafana", "namespace": "taco-system ", "target_cluster": "" }
154
154
]
155
155
156
156
- - name : update-status-done
@@ -223,8 +223,8 @@ spec:
223
223
value : |
224
224
[
225
225
{ "app_group": "lma", "path": "lma-bucket", "namespace": "taco-system", "target_cluster": "" },
226
- { "app_group": "lma", "path": "loki", "namespace": "lma ", "target_cluster": "" },
227
- { "app_group": "lma", "path": "loki-user", "namespace": "lma ", "target_cluster": "" }
226
+ { "app_group": "lma", "path": "loki", "namespace": "taco-system ", "target_cluster": "" },
227
+ { "app_group": "lma", "path": "loki-user", "namespace": "taco-system ", "target_cluster": "" }
228
228
]
229
229
230
230
- name : update-eps-for-thanos
@@ -473,9 +473,9 @@ spec:
473
473
LOKI_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.loki}'| base64 -d )
474
474
475
475
if [[ "$LOKI_SERVICE" == "" ]]; then
476
- while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
476
+ while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
477
477
do
478
- if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
478
+ if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
479
479
log "FAIL" "The infras on primary are not cofigured properly.(No LoadBalancer)"
480
480
exit -1
481
481
fi
@@ -484,8 +484,8 @@ spec:
484
484
sleep 3
485
485
done
486
486
487
- LOKI_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
488
- LOKI_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
487
+ LOKI_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
488
+ LOKI_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
489
489
else
490
490
LOKI_HOST=$(echo $LOKI_SERVICE | awk -F : '{print $1}')
491
491
LOKI_PORT=$(echo $LOKI_SERVICE | awk -F : '{print $2}')
@@ -497,9 +497,9 @@ spec:
497
497
LOKI_USER_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.loki_user}'| base64 -d )
498
498
499
499
if [[ "$LOKI_USER_SERVICE" == "" ]]; then
500
- while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
500
+ while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
501
501
do
502
- if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
502
+ if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
503
503
log "FAIL" "The infras on primary are not cofigured properly.(No LoadBalancer)"
504
504
exit -1
505
505
fi
@@ -508,8 +508,8 @@ spec:
508
508
sleep 3
509
509
done
510
510
511
- LOKI_USER_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
512
- LOKI_USER_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma loki-user-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
511
+ LOKI_USER_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
512
+ LOKI_USER_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system loki-user-loki-distributed-gateway -o jsonpath="{.spec.ports[0].port}")
513
513
else
514
514
LOKI_USER_HOST=$(echo $LOKI_USER_SERVICE | awk -F : '{print $1}')
515
515
LOKI_USER_PORT=$(echo $LOKI_USER_SERVICE | awk -F : '{print $2}')
@@ -521,8 +521,8 @@ spec:
521
521
if [[ "${primary_cluster}" == "${current_cluster}" ]] && [[ "$OBJECT_STORE" == "minio" ]]; then
522
522
S3_SERVICE=$(kubectl get secret -n ${primary_cluster} tks-endpoint-secret -o jsonpath='{.data.minio}'| base64 -d )
523
523
if [[ "$S3_SERVICE" == "" ]]; then
524
- S3_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n lma minio -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
525
- S3_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n lma minio -o jsonpath="{.spec.ports[0].port}")
524
+ S3_HOST=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system minio -o jsonpath="{.status.loadBalancer.ingress[0].hostname}")
525
+ S3_PORT=$(kubectl --kubeconfig=kubeconfig get svc -n taco-system minio -o jsonpath="{.spec.ports[0].port}")
526
526
S3_SERVICE=${S3_HOST}:${S3_PORT}
527
527
fi
528
528
fi
@@ -741,10 +741,10 @@ spec:
741
741
PROMETHEUS_URL=$(kubectl get secret -n ${member} tks-endpoint-secret -o jsonpath='{.data.prometheus}'| base64 -d )
742
742
if [[ "$PROMETHEUS_URL" != "" ]]; then
743
743
eplist="${eplist}, \"${PROMETHEUS_URL}\""
744
- elif [ `kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external --ignore-not-found=true | grep -v NAME | wc -l ` -eq 1 ]; then
745
- while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
744
+ elif [ `kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external --ignore-not-found=true | grep -v NAME | wc -l ` -eq 1 ]; then
745
+ while [ -z $(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[*].hostname}") ]
746
746
do
747
- if [ "$(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
747
+ if [ "$(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.spec.type}")" -neq "LoadBalancer" ]; then
748
748
log "FAIL" "A service for the thanos-sidcar in ${member} is not cofigured properly.(No LoadBalancer)"
749
749
exit -1
750
750
fi
@@ -753,7 +753,7 @@ spec:
753
753
sleep 3
754
754
done
755
755
756
- eplist="${eplist}, \"$(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[0].hostname}"):$(kubectl --kubeconfig=kubeconfig get svc -n lma lma-thanos-external -o jsonpath="{.spec.ports[0].port}")\""
756
+ eplist="${eplist}, \"$(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.status.loadBalancer.ingress[0].hostname}"):$(kubectl --kubeconfig=kubeconfig get svc -n taco-system lma-thanos-external -o jsonpath="{.spec.ports[0].port}")\""
757
757
else
758
758
log "WARN" "Cluster(${member}) has no prometheus sidecar"
759
759
fi
@@ -1068,7 +1068,7 @@ spec:
1068
1068
kubectl --kubeconfig /kube/value get secret -n ${cluster_id} ${cluster_id}-tks-kubeconfig -o jsonpath="{.data.value}" | base64 -d > ~/kubeconfig
1069
1069
export KUBECONFIG=~/kubeconfig
1070
1070
1071
- kubectl delete job -n lma empty-buckets-before-delete-them --ignore-not-found=true
1071
+ kubectl delete job -n taco-system empty-buckets-before-delete-them --ignore-not-found=true
1072
1072
cat <<EOF | kubectl apply -f -
1073
1073
apiVersion: batch/v1
1074
1074
kind: Job
@@ -1099,12 +1099,12 @@ spec:
1099
1099
backoffLimit: 4
1100
1100
EOF
1101
1101
1102
- while [ $(kubectl get -n lma job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}'|wc -c) -eq 0 ]; do
1102
+ while [ $(kubectl get -n taco-system job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}'|wc -c) -eq 0 ]; do
1103
1103
echo "A job is not completed. wait 10 seconds...."
1104
1104
sleep 10
1105
1105
done
1106
1106
1107
- if [[ $(kubectl get -n lma job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}') == "Complete" ]]; then
1107
+ if [[ $(kubectl get -n taco-system job empty-buckets-before-delete-them -o jsonpath='{.status.conditions[0].type}') == "Complete" ]]; then
1108
1108
exit 0
1109
1109
else
1110
1110
exit -1
0 commit comments