added the methods to write the appropriate argo annotation when using a storage of type s3 as an artifact repository, with hardcoded credentials

This commit is contained in:
pb
2025-06-20 11:28:12 +02:00
parent 1e4011d5b1
commit a46708842b
6 changed files with 156 additions and 106 deletions

20
main.go
View File

@@ -102,16 +102,16 @@ func main() {
workflowName = getContainerName(argoFilePath)
if conf.GetConfig().KubeHost == "" {
// Not in a k8s environment, get conf from parameters
logger.Info().Msg("Executes outside of k8s")
executeOutside(argoFilePath, builder.Workflow)
} else {
// Executed in a k8s environment
logger.Info().Msg("Executes inside a k8s")
// executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID()
executeInside(conf.GetConfig().ExecutionID, exec.ExecutionsID, argoFilePath)
}
// if conf.GetConfig().KubeHost == "" {
// // Not in a k8s environment, get conf from parameters
// logger.Info().Msg("Executes outside of k8s")
// executeOutside(argoFilePath, builder.Workflow)
// } else {
// // Executed in a k8s environment
// logger.Info().Msg("Executes inside a k8s")
// // executeInside(exec.GetID(), "argo", argo_file_path, stepMax) // commenting to use conf.ExecutionID instead of exec.GetID()
// executeInside(conf.GetConfig().ExecutionID, exec.ExecutionsID, argoFilePath)
// }
}
// So far we only log the output from