Update Oclib for event generation
This commit is contained in:
@@ -14,61 +14,60 @@ import (
|
||||
tools "cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
|
||||
type AdmiraltySetter struct {
|
||||
Id string // ID to identify the execution, correspond to workflow_executions id
|
||||
NodeName string // Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"}
|
||||
Id string // ID to identify the execution, correspond to workflow_executions id
|
||||
NodeName string // Allows to retrieve the name of the node used for this execution on each peer {"peerId": "nodeName"}
|
||||
}
|
||||
|
||||
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string,remotePeerID string) error {
|
||||
|
||||
func (s *AdmiraltySetter) InitializeAdmiralty(localPeerID string, remotePeerID string) error {
|
||||
|
||||
logger := logs.GetLogger()
|
||||
|
||||
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(remotePeerID)
|
||||
data := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", localPeerID, nil, nil).LoadOne(remotePeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate remote peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
remotePeer := data.ToPeer()
|
||||
|
||||
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER),"",localPeerID,nil,nil).LoadOne(localPeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
localPeer := data.ToPeer()
|
||||
data = oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", localPeerID, nil, nil).LoadOne(localPeerID)
|
||||
if data.Code != 200 {
|
||||
logger.Error().Msg("Error while trying to instantiate local peer " + remotePeerID)
|
||||
return fmt.Errorf(data.Err)
|
||||
}
|
||||
localPeer := data.ToPeer()
|
||||
|
||||
caller := tools.NewHTTPCaller(
|
||||
map[tools.DataType]map[tools.METHOD]string{
|
||||
tools.ADMIRALTY_SOURCE: {
|
||||
tools.POST :"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_KUBECONFIG: {
|
||||
tools.GET:"/:id",
|
||||
},
|
||||
tools.ADMIRALTY_SECRET: {
|
||||
tools.POST:"/:id/" + remotePeerID,
|
||||
},
|
||||
tools.ADMIRALTY_TARGET: {
|
||||
tools.POST:"/:id/" + remotePeerID,
|
||||
},
|
||||
tools.ADMIRALTY_NODES: {
|
||||
tools.GET:"/:id/" + remotePeerID,
|
||||
},
|
||||
caller := tools.NewHTTPCaller(
|
||||
map[tools.DataType]map[tools.METHOD]string{
|
||||
tools.ADMIRALTY_SOURCE: {
|
||||
tools.POST: "/:id",
|
||||
},
|
||||
)
|
||||
|
||||
tools.ADMIRALTY_KUBECONFIG: {
|
||||
tools.GET: "/:id",
|
||||
},
|
||||
tools.ADMIRALTY_SECRET: {
|
||||
tools.POST: "/:id/" + remotePeerID,
|
||||
},
|
||||
tools.ADMIRALTY_TARGET: {
|
||||
tools.POST: "/:id/" + remotePeerID,
|
||||
},
|
||||
tools.ADMIRALTY_NODES: {
|
||||
tools.GET: "/:id/" + remotePeerID,
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
logger.Info().Msg("\n\n Creating the Admiralty Source on " + remotePeerID + " ns-" + s.Id)
|
||||
s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict},caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true)
|
||||
s.callRemoteExecution(remotePeer, []int{http.StatusCreated, http.StatusConflict}, caller, s.Id, tools.ADMIRALTY_SOURCE, tools.POST, nil, true)
|
||||
logger.Info().Msg("\n\n Retrieving kubeconfig with the secret on " + remotePeerID + " ns-" + s.Id)
|
||||
kubeconfig := s.getKubeconfig(remotePeer, caller)
|
||||
logger.Info().Msg("\n\n Creating a secret from the kubeconfig " + localPeerID + " ns-" + s.Id)
|
||||
s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller,s.Id, tools.ADMIRALTY_SECRET, tools.POST,kubeconfig, true)
|
||||
logger.Info().Msg("\n\n Creating the Admiralty Target on " + localPeerID + " in namespace " + s.Id )
|
||||
s.callRemoteExecution(localPeer,[]int{http.StatusCreated, http.StatusConflict},caller,s.Id,tools.ADMIRALTY_TARGET,tools.POST, nil, true)
|
||||
s.callRemoteExecution(localPeer, []int{http.StatusCreated}, caller, s.Id, tools.ADMIRALTY_SECRET, tools.POST, kubeconfig, true)
|
||||
logger.Info().Msg("\n\n Creating the Admiralty Target on " + localPeerID + " in namespace " + s.Id)
|
||||
s.callRemoteExecution(localPeer, []int{http.StatusCreated, http.StatusConflict}, caller, s.Id, tools.ADMIRALTY_TARGET, tools.POST, nil, true)
|
||||
logger.Info().Msg("\n\n Checking for the creation of the admiralty node on " + localPeerID + " ns-" + s.Id)
|
||||
s.checkNodeStatus(localPeer,caller)
|
||||
|
||||
s.checkNodeStatus(localPeer, caller)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -90,11 +89,11 @@ func (s *AdmiraltySetter) getKubeconfig(peer *peer.Peer, caller *tools.HTTPCalle
|
||||
return kubedata
|
||||
}
|
||||
|
||||
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) {
|
||||
func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int, caller *tools.HTTPCaller, dataID string, dt tools.DataType, method tools.METHOD, body interface{}, panicCode bool) {
|
||||
l := utils.GetLogger()
|
||||
_, err := peer.LaunchPeerExecution(peer.UUID, dataID, dt, method, body, caller)
|
||||
if err != nil {
|
||||
l.Error().Msg("Error when executing on peer at" + peer.Url)
|
||||
l.Error().Msg("Error when executing on peer at" + peer.APIUrl)
|
||||
l.Error().Msg(err.Error())
|
||||
panic(0)
|
||||
}
|
||||
@@ -111,7 +110,7 @@ func (*AdmiraltySetter) callRemoteExecution(peer *peer.Peer, expectedCode []int,
|
||||
|
||||
}
|
||||
|
||||
func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){
|
||||
func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller) {
|
||||
var data map[string]interface{}
|
||||
if resp, ok := caller.LastResults["body"]; ok {
|
||||
json.Unmarshal(resp.([]byte), &data)
|
||||
@@ -128,10 +127,10 @@ func (s *AdmiraltySetter) storeNodeName(caller *tools.HTTPCaller){
|
||||
}
|
||||
}
|
||||
|
||||
func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller){
|
||||
for i := range(5) {
|
||||
func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HTTPCaller) {
|
||||
for i := range 5 {
|
||||
time.Sleep(10 * time.Second) // let some time for kube to generate the node
|
||||
s.callRemoteExecution(localPeer,[]int{http.StatusOK},caller,s.Id,tools.ADMIRALTY_NODES,tools.GET, nil, false)
|
||||
s.callRemoteExecution(localPeer, []int{http.StatusOK}, caller, s.Id, tools.ADMIRALTY_NODES, tools.GET, nil, false)
|
||||
if caller.LastResults["code"] == 200 {
|
||||
s.storeNodeName(caller)
|
||||
return
|
||||
@@ -142,5 +141,5 @@ func (s *AdmiraltySetter) checkNodeStatus(localPeer *peer.Peer, caller *tools.HT
|
||||
}
|
||||
logger.Info().Msg("Could not verify that node is up. Retrying...")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/logs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/common/enum"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources/native_tools"
|
||||
w "cloud.o-forge.io/core/oc-lib/models/workflow"
|
||||
@@ -194,30 +195,30 @@ func (b *ArgoBuilder) createArgoTemplates(
|
||||
exec *workflow_execution.WorkflowExecution,
|
||||
namespace string,
|
||||
id string,
|
||||
processing resources.ResourceInterface,
|
||||
obj resources.ResourceInterface,
|
||||
volumes []VolumeMount,
|
||||
firstItems []string,
|
||||
lastItems []string) ([]VolumeMount, []string, []string) {
|
||||
_, firstItems, lastItems = b.addTaskToArgo(exec, b.Workflow.getDag(), id, processing, firstItems, lastItems)
|
||||
template := &Template{Name: getArgoName(processing.GetName(), id)}
|
||||
_, firstItems, lastItems = b.addTaskToArgo(exec, b.Workflow.getDag(), id, obj, firstItems, lastItems)
|
||||
template := &Template{Name: getArgoName(obj.GetName(), id)}
|
||||
logger.Info().Msg(fmt.Sprint("Creating template for", template.Name))
|
||||
isReparted, peerId := b.isProcessingReparted(processing, id)
|
||||
if processing.GetType() == tools.PROCESSING_RESOURCE.String() {
|
||||
template.CreateContainer(exec, processing.(*resources.ProcessingResource), b.Workflow.getDag())
|
||||
} else if processing.GetType() == tools.NATIVE_TOOL.String() {
|
||||
template.CreateEventContainer(exec, processing.(*resources.NativeTool), b.Workflow.getDag())
|
||||
isReparted, peer := b.isReparted(obj, id)
|
||||
if obj.GetType() == tools.PROCESSING_RESOURCE.String() {
|
||||
template.CreateContainer(exec, obj.(*resources.ProcessingResource), b.Workflow.getDag())
|
||||
} else if obj.GetType() == tools.NATIVE_TOOL.String() {
|
||||
template.CreateEventContainer(exec, obj.(*resources.NativeTool), b.Workflow.getDag())
|
||||
}
|
||||
|
||||
if isReparted {
|
||||
logger.Debug().Msg("Reparted processing, on " + peerId)
|
||||
b.RemotePeers = append(b.RemotePeers, peerId)
|
||||
template.AddAdmiraltyAnnotations(peerId)
|
||||
logger.Debug().Msg("Reparted processing, on " + peer.GetID())
|
||||
b.RemotePeers = append(b.RemotePeers, peer.GetID())
|
||||
template.AddAdmiraltyAnnotations(peer.GetID())
|
||||
}
|
||||
// get datacenter from the processing
|
||||
if processing.GetType() == tools.PROCESSING_RESOURCE.String() && processing.(*resources.ProcessingResource).IsService {
|
||||
b.CreateService(exec, id, processing)
|
||||
if obj.GetType() == tools.PROCESSING_RESOURCE.String() && obj.(*resources.ProcessingResource).IsService {
|
||||
b.CreateService(exec, id, obj)
|
||||
template.Metadata.Labels = make(map[string]string)
|
||||
template.Metadata.Labels["app"] = "oc-service-" + processing.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing
|
||||
template.Metadata.Labels["app"] = "oc-service-" + obj.GetName() // Construct the template for the k8s service and add a link in graph between k8s service and processing
|
||||
}
|
||||
|
||||
volumes = b.addStorageAnnotations(exec, id, template, namespace, volumes)
|
||||
@@ -470,7 +471,7 @@ func getArgoName(raw_name string, component_id string) (formatedName string) {
|
||||
|
||||
// Verify if a processing resource is attached to another Compute than the one hosting
|
||||
// the current Open Cloud instance. If true return the peer ID to contact
|
||||
func (b *ArgoBuilder) isProcessingReparted(processing resources.ResourceInterface, graphID string) (bool, string) {
|
||||
func (b *ArgoBuilder) isReparted(processing resources.ResourceInterface, graphID string) (bool, *peer.Peer) {
|
||||
computeAttached := b.retrieveProcessingCompute(graphID)
|
||||
if computeAttached == nil {
|
||||
logger.Error().Msg("No compute was found attached to processing " + processing.GetName() + " : " + processing.GetID())
|
||||
@@ -481,22 +482,22 @@ func (b *ArgoBuilder) isProcessingReparted(processing resources.ResourceInterfac
|
||||
req := oclib.NewRequest(oclib.LibDataEnum(oclib.PEER), "", "", nil, nil)
|
||||
if req == nil {
|
||||
fmt.Println("TODO : handle error when trying to create a request on the Peer Collection")
|
||||
return false, ""
|
||||
return false, nil
|
||||
}
|
||||
|
||||
res := req.LoadOne(computeAttached.CreatorID)
|
||||
if res.Err != "" {
|
||||
fmt.Print("TODO : handle error when requesting PeerID")
|
||||
fmt.Print(res.Err)
|
||||
return false, ""
|
||||
return false, nil
|
||||
}
|
||||
|
||||
peer := *res.ToPeer()
|
||||
peer := res.ToPeer()
|
||||
|
||||
isNotReparted := peer.State == 1
|
||||
logger.Info().Msg(fmt.Sprint("Result IsMySelf for ", peer.UUID, " : ", isNotReparted))
|
||||
|
||||
return !isNotReparted, peer.UUID
|
||||
return !isNotReparted, peer
|
||||
}
|
||||
|
||||
func (b *ArgoBuilder) retrieveProcessingCompute(graphID string) *resources.ComputeResource {
|
||||
|
||||
Reference in New Issue
Block a user