package node import ( "context" "encoding/json" "errors" "fmt" "oc-discovery/conf" "oc-discovery/daemons/node/common" "oc-discovery/daemons/node/indexer" "oc-discovery/daemons/node/pubsub" "oc-discovery/daemons/node/stream" "sync" "time" oclib "cloud.o-forge.io/core/oc-lib" "cloud.o-forge.io/core/oc-lib/dbs" "cloud.o-forge.io/core/oc-lib/models/peer" "cloud.o-forge.io/core/oc-lib/tools" "github.com/google/uuid" "github.com/libp2p/go-libp2p" pubsubs "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/core/network" pp "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/protocol" "github.com/libp2p/go-libp2p/p2p/security/noise" ) // activeSearch tracks an in-flight distributed peer search for one user. type activeSearch struct { queryID string cancel context.CancelFunc } type Node struct { *common.LongLivedStreamRecordedService[interface{}] // change type of stream PS *pubsubs.PubSub IndexerService *indexer.IndexerService PubSubService *pubsub.PubSubService StreamService *stream.StreamService PeerID pp.ID isIndexer bool peerRecord *indexer.PeerRecord // activeSearches: one streaming search per user; new search cancels previous. activeSearchesMu sync.Mutex activeSearches map[string]*activeSearch Mu sync.RWMutex } func InitNode(isNode bool, isIndexer bool) (*Node, error) { if !isNode && !isIndexer { return nil, errors.New("wait... what ? your node need to at least something. Retry we can't be friend in that case") } logger := oclib.GetLogger() logger.Info().Msg("retrieving private key...") priv, err := tools.LoadKeyFromFilePrivate() // your node private key if err != nil { return nil, err } logger.Info().Msg("retrieving psk file...") psk, err := common.LoadPSKFromFile() // network common private Network. Public OC PSK is Public Network if err != nil { return nil, nil } logger.Info().Msg("open a host...") gater := newOCConnectionGater(nil) // host set below after creation h, err := libp2p.New( libp2p.PrivateNetwork(psk), libp2p.Identity(priv), libp2p.Security(noise.ID, noise.New), libp2p.ListenAddrStrings( fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", conf.GetConfig().NodeEndpointPort), ), libp2p.ConnectionGater(gater), ) gater.host = h // wire host back into gater now that it exists logger.Info().Msg("Host open on " + h.ID().String()) if err != nil { return nil, errors.New("no host no node") } node := &Node{ PeerID: h.ID(), isIndexer: isIndexer, LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000), activeSearches: map[string]*activeSearch{}, } // Register the bandwidth probe handler so any peer measuring this node's // throughput can open a dedicated probe stream and read the echo. h.SetStreamHandler(common.ProtocolBandwidthProbe, common.HandleBandwidthProbe) // Register the witness query handler so peers can ask this node's view of indexers. h.SetStreamHandler(common.ProtocolWitnessQuery, func(s network.Stream) { common.HandleWitnessQuery(h, s) }) var ps *pubsubs.PubSub if isNode { logger.Info().Msg("generate opencloud node...") ps, err = pubsubs.NewGossipSub(context.Background(), node.Host) if err != nil { panic(err) // can't run your node without a propalgation pubsub, of state of node. } node.PS = ps // buildRecord returns a fresh signed PeerRecord as JSON, embedded in each // heartbeat so the receiving indexer can republish it to the DHT directly. // peerRecord is nil until claimInfo runs, so the first ~20s heartbeats carry // no record — that's fine, claimInfo publishes once synchronously at startup. buildRecord := func() json.RawMessage { if node.peerRecord == nil { return nil } priv, err := tools.LoadKeyFromFilePrivate() if err != nil { return nil } fresh := *node.peerRecord fresh.PeerRecordPayload.ExpiryDate = time.Now().UTC().Add(2 * time.Minute) payload, _ := json.Marshal(fresh.PeerRecordPayload) fresh.Signature, err = priv.Sign(payload) if err != nil { return nil } b, _ := json.Marshal(fresh) return json.RawMessage(b) } logger.Info().Msg("connect to indexers...") common.ConnectToIndexers(node.Host, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer, buildRecord) logger.Info().Msg("claims my node...") if _, err := node.claimInfo(conf.GetConfig().Name, conf.GetConfig().Hostname); err != nil { panic(err) } logger.Info().Msg("run garbage collector...") node.StartGC(30 * time.Second) if node.StreamService, err = stream.InitStream(context.Background(), node.Host, node.PeerID, 1000, node); err != nil { panic(err) } if node.PubSubService, err = pubsub.InitPubSub(context.Background(), node.Host, node.PS, node, node.StreamService); err != nil { panic(err) } f := func(ctx context.Context, evt common.Event, topic string) { m := map[string]interface{}{} err := json.Unmarshal(evt.Payload, &m) if err != nil || evt.From == node.PeerID.String() { return } if p, err := node.GetPeerRecord(ctx, evt.From, false); err == nil && len(p) > 0 && m["search"] != nil { node.StreamService.SendResponse(p[0], &evt, fmt.Sprintf("%v", m["search"])) } } logger.Info().Msg("subscribe to decentralized search flow...") node.SubscribeToSearch(node.PS, &f) logger.Info().Msg("connect to NATS") go ListenNATS(node) logger.Info().Msg("Node is actually running.") } if isIndexer { logger.Info().Msg("generate opencloud indexer...") node.IndexerService = indexer.NewIndexerService(node.Host, ps, 500) } return node, nil } func (d *Node) Close() { if d.isIndexer && d.IndexerService != nil { d.IndexerService.Close() } d.PubSubService.Close() d.StreamService.Close() d.Host.Close() } func (d *Node) publishPeerRecord( rec *indexer.PeerRecord, ) error { priv, err := tools.LoadKeyFromFilePrivate() // your node private key if err != nil { return err } for _, ad := range common.Indexers.GetAddrs() { var err error if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolPublish, "", common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{}, &common.Indexers.MuStream); err != nil { continue } stream := common.Indexers.Streams.GetPerID(common.ProtocolPublish, ad.Info.ID) base := indexer.PeerRecordPayload{ Name: rec.Name, DID: rec.DID, PubKey: rec.PubKey, ExpiryDate: time.Now().UTC().Add(2 * time.Minute), } payload, _ := json.Marshal(base) rec.PeerRecordPayload = base rec.Signature, err = priv.Sign(payload) if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream return err } } return nil } // SearchPeerRecord starts a distributed peer search via ProtocolSearchPeer. // userKey identifies the requesting user — a new call cancels any previous // search for the same user. Results are pushed to onResult as they arrive. // The function returns when the search stream closes (idle timeout or indexer unreachable). func (d *Node) SearchPeerRecord(userKey, needle string, onResult func(common.SearchHit)) { logger := oclib.GetLogger() ctx, cancel := context.WithCancel(context.Background()) d.activeSearchesMu.Lock() if prev, ok := d.activeSearches[userKey]; ok { prev.cancel() } queryID := uuid.New().String() d.activeSearches[userKey] = &activeSearch{queryID: queryID, cancel: cancel} d.activeSearchesMu.Unlock() defer func() { cancel() d.activeSearchesMu.Lock() if cur, ok := d.activeSearches[userKey]; ok && cur.queryID == queryID { delete(d.activeSearches, userKey) } d.activeSearchesMu.Unlock() }() req := common.SearchPeerRequest{QueryID: queryID} if pid, err := pp.Decode(needle); err == nil { req.PeerID = pid.String() } else if _, err := uuid.Parse(needle); err == nil { req.DID = needle } else { req.Name = needle } // Try indexers in pool order until one accepts the stream. for _, ad := range common.Indexers.GetAddrs() { if ad.Info == nil { continue } dialCtx, dialCancel := context.WithTimeout(ctx, 5*time.Second) s, err := d.Host.NewStream(dialCtx, ad.Info.ID, common.ProtocolSearchPeer) dialCancel() if err != nil { continue } if err := json.NewEncoder(s).Encode(req); err != nil { s.Reset() continue } dec := json.NewDecoder(s) for { var result common.SearchPeerResult if err := dec.Decode(&result); err != nil { break } if result.QueryID != queryID { continue // stale response from a previous query } for _, hit := range result.Records { onResult(hit) } } s.Reset() return } logger.Warn().Str("user", userKey).Msg("[search] no reachable indexer for peer search") } func (d *Node) GetPeerRecord( ctx context.Context, pidOrdid string, search bool, ) ([]*peer.Peer, error) { var err error var info map[string]indexer.PeerRecord // Build the GetValue request: if pidOrdid is neither a UUID DID nor a libp2p // PeerID, treat it as a human-readable name and let the indexer resolve it. getReq := indexer.GetValue{Key: pidOrdid} if pidR, pidErr := pp.Decode(pidOrdid); pidErr == nil { getReq.PeerID = pidR.String() } else if _, uuidErr := uuid.Parse(pidOrdid); uuidErr != nil { // Not a UUID DID → treat pidOrdid as a name substring search. getReq.Name = pidOrdid getReq.Key = "" } getReq.Search = search for _, ad := range common.Indexers.GetAddrs() { if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolGet, "", common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{}, &common.Indexers.MuStream); err != nil { continue } stream := common.Indexers.Streams.GetPerID(common.ProtocolGet, ad.Info.ID) if err := json.NewEncoder(stream.Stream).Encode(getReq); err != nil { continue } var resp indexer.GetResponse if err := json.NewDecoder(stream.Stream).Decode(&resp); err != nil { continue } if resp.Found { info = resp.Records } break } var ps []*peer.Peer for _, pr := range info { if pk, err := pr.Verify(); err != nil { return nil, err } else if _, p, err := pr.ExtractPeer(d.PeerID.String(), pr.PeerID, pk); err != nil { return nil, err } else { ps = append(ps, p) } } return ps, err } func (d *Node) claimInfo( name string, endPoint string, // TODO : endpoint is not necesserry StreamAddress ) (*peer.Peer, error) { if endPoint == "" { return nil, errors.New("no endpoint found for peer") } did := uuid.New().String() peers := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{ And: map[string][]dbs.Filter{ // search by name if no filters are provided "peer_id": {{Operator: dbs.EQUAL.String(), Value: d.Host.ID().String()}}, }, }, "", false) if len(peers.Data) > 0 { did = peers.Data[0].GetID() // if already existing set up did as made } priv, err := tools.LoadKeyFromFilePrivate() if err != nil { return nil, err } pub, err := tools.LoadKeyFromFilePublic() if err != nil { return nil, err } pubBytes, err := crypto.MarshalPublicKey(pub) if err != nil { return nil, err } now := time.Now().UTC() expiry := now.Add(150 * time.Second) pRec := indexer.PeerRecordPayload{ Name: name, DID: did, // REAL PEER ID PubKey: pubBytes, ExpiryDate: expiry, } d.PeerID = d.Host.ID() payload, _ := json.Marshal(pRec) rec := &indexer.PeerRecord{ PeerRecordPayload: pRec, } rec.Signature, err = priv.Sign(payload) if err != nil { return nil, err } rec.PeerID = d.Host.ID().String() rec.APIUrl = endPoint rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + "/tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + "/p2p/" + rec.PeerID rec.NATSAddress = oclib.GetConfig().NATSUrl rec.WalletAddress = "my-wallet" if err := d.publishPeerRecord(rec); err != nil { return nil, err } d.peerRecord = rec if _, err := rec.Verify(); err != nil { return nil, err } else { _, p, err := rec.ExtractPeer(did, did, pub) b, err := json.Marshal(p) if err != nil { return p, err } go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{ FromApp: "oc-discovery", Datatype: tools.PEER, Method: int(tools.CREATE_RESOURCE), SearchAttr: "peer_id", Payload: b, }) return p, err } } /* TODO: - Le booking est un flow neuf décentralisé : On check on attend une réponse, on valide, il passe par discovery, on relais. - Le shared workspace est une affaire de décentralisation, on communique avec les shared les mouvements - Un shared remplace la notion de partnership à l'échelle de partnershipping -> quand on share un workspace on devient partenaire temporaire qu'on le soit originellement ou non. -> on a alors les mêmes privilèges. - Les orchestrations admiralty ont le même fonctionnement. Un evenement provoque alors une création de clé de service. On doit pouvoir crud avec verification de signature un DBobject. */