22 Commits

Author SHA1 Message Date
mr
780a0c530d Change 2026-03-11 19:29:39 +01:00
mr
d0af40f4c7 Simple Architecture 2026-03-11 16:28:15 +01:00
mr
83cef6e6f6 saved 2026-03-09 14:57:41 +01:00
mr
3751ec554d Full Flow : Catalog + Peer 2026-03-05 15:22:02 +01:00
mr
ef3d998ead demo test + Peer 2026-03-03 16:38:24 +01:00
mr
79aa3cc2b3 adjust 2026-02-26 09:14:34 +01:00
mr
779e36aaef Pass + Doc 2026-02-24 14:31:37 +01:00
mr
572da29fd4 check up if peer is sourced. 2026-02-20 15:01:01 +01:00
mr
3eae5791a1 Native Indexer Mode 2026-02-20 12:42:18 +01:00
mr
88fd05066c update lightest peer and nats behaviors 2026-02-18 14:32:44 +01:00
mr
0250c3b339 Peer Discovery -> DHT // no more pubsub state 2026-02-18 13:29:50 +01:00
mr
6a5ffb9a92 Indexer Quality Score TrustLess 2026-02-17 13:11:22 +01:00
mr
fa914958b6 Keep Peer Caching + Resource Verification. 2026-02-09 13:28:00 +01:00
mr
1c0b2b4312 better tagging 2026-02-09 09:45:41 +01:00
mr
631e2846fe remove apk 2026-02-09 08:55:50 +01:00
mr
d985d8339a Change of state Conn Management 2026-02-05 16:17:33 +01:00
mr
ea14ad3933 Closure On change of state 2026-02-05 16:17:14 +01:00
mr
2e31df89c2 oc-discovery + auto create peer 2026-02-05 15:47:29 +01:00
mr
425cbdfe7d stream address 2026-02-05 15:36:22 +01:00
mr
8ee5b84e21 publish-registry 2026-02-05 12:14:02 +01:00
mr
552bb17e2b Connectivity ok 2026-02-05 11:23:11 +01:00
mr
88e29073a2 dockerfile default 2026-02-05 09:31:51 +01:00
71 changed files with 7657 additions and 1427 deletions

View File

@@ -21,11 +21,6 @@ RUN go mod download
FROM golang:alpine AS builder
ARG CONF_NUM
# Fail fast if CONF_NUM missing
RUN test -n "$CONF_NUM"
RUN apk add --no-cache git
WORKDIR /oc-discovery
# Reuse Go cache
@@ -55,13 +50,13 @@ WORKDIR /app
RUN mkdir ./pem
COPY --from=builder /app/extracted/pem/private${CONF_NUM}.pem ./pem/private.pem
COPY --from=builder /app/extracted/pem/private${CONF_NUM:-1}.pem ./pem/private.pem
COPY --from=builder /app/extracted/psk ./psk
COPY --from=builder /app/extracted/pem/public${CONF_NUM}.pem ./pem/public.pem
COPY --from=builder /app/extracted/pem/public${CONF_NUM:-1}.pem ./pem/public.pem
COPY --from=builder /app/extracted/oc-discovery /usr/bin/oc-discovery
COPY --from=builder /app/extracted/docker_discovery${CONF_NUM}.json /etc/oc/discovery.json
COPY --from=builder /app/extracted/docker_discovery${CONF_NUM:-1}.json /etc/oc/discovery.json
EXPOSE 400${CONF_NUM}
EXPOSE 400${CONF_NUM:-1}
ENTRYPOINT ["oc-discovery"]

View File

@@ -10,15 +10,17 @@ clean:
rm -rf oc-discovery
docker:
DOCKER_BUILDKIT=1 docker build -t oc/oc-discovery:0.0.1 -f Dockerfile .
docker tag oc/oc-discovery:0.0.1 oc/oc-discovery:latest
DOCKER_BUILDKIT=1 docker build -t oc-discovery -f Dockerfile .
docker tag oc-discovery opencloudregistry/oc-discovery:latest
publish-kind:
kind load docker-image oc/oc-discovery:0.0.1 --name opencloud
kind load docker-image opencloudregistry/oc-discovery:latest --name opencloud
publish-registry:
@echo "TODO"
docker push opencloudregistry/oc-discovery:latest
all: docker publish-kind publish-registry
all: docker publish-kind
ci: docker publish-registry
.PHONY: build run clean docker publish-kind publish-registry

View File

@@ -14,3 +14,34 @@ If default Swagger page is displayed instead of tyour api, change url in swagger
url: "swagger.json"
sequenceDiagram
autonumber
participant Dev as Développeur / Owner
participant IPFS as Réseau IPFS
participant CID as CID (hash du fichier)
participant Argo as Orchestrateur Argo
participant CU as Compute Unit
participant MinIO as Storage MinIO
%% 1. Ajout du fichier sur IPFS
Dev->>IPFS: Chiffre et ajoute fichier (algo/dataset)
IPFS-->>CID: Génère CID unique (hash du fichier)
Dev->>Dev: Stocke CID pour référence future
%% 2. Orchestration par Argo
Argo->>CID: Requête CID pour job
CID-->>Argo: Fournit le fichier (vérifié via hash)
%% 3. Execution sur la Compute Unit
Argo->>CU: Déploie job avec fichier récupéré
CU->>CU: Vérifie hash (CID) pour intégrité
CU->>CU: Exécute l'algo sur le dataset
%% 4. Stockage des résultats
CU->>MinIO: Stocke output (résultats) ou logs
CU->>IPFS: Optionnel : ajoute output sur IPFS (nouveau CID)
%% 5. Vérification et traçabilité
Dev->>IPFS: Vérifie CID output si nécessaire
CU->>Dev: Fournit résultat et log de hash

View File

@@ -14,6 +14,22 @@ type Config struct {
PeerIDS string // TO REMOVE
NodeMode string
MinIndexer int
MaxIndexer int
// SearchTimeout is the max duration without a new result before the
// distributed peer search stream is closed. Default: 5s.
SearchTimeout int // seconds; 0 → use default (5)
// Indexer connection burst guard: max new connections accepted within the window.
// 0 → use defaults (20 new peers per 30s).
MaxConnPerWindow int // default 20
ConnWindowSecs int // default 30
// Per-node behavioral limits (sliding 60s window). 0 → use built-in defaults.
MaxHBPerMinute int // default 5
MaxPublishPerMinute int // default 10
MaxGetPerMinute int // default 50
}
var instance *Config

View File

@@ -0,0 +1,294 @@
package common
import (
"errors"
"sync"
"time"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type Score struct {
FirstContacted time.Time
UptimeTracker *UptimeTracker
LastFillRate float64
Score float64
// IsSeed marks indexers that came from the IndexerAddresses static config.
// Seeds are sticky: they are never evicted by the score threshold alone.
// A seed is only removed when: (a) heartbeat fails, or (b) it sends
// SuggestMigrate and the node already has MinIndexer non-seed alternatives.
IsSeed bool
// challenge bookkeeping (2-3 peers per batch, raw data returned by indexer)
hbCount int // heartbeats sent since last challenge batch
nextChallenge int // send challenges when hbCount reaches this (rand 1-10)
challengeTotal int // number of own-PeerID challenges sent (ground truth)
challengeCorrect int // own PeerID found AND lastSeen within 2×interval
// fill rate consistency: cross-check reported fillRate vs peerCount/maxNodes
fillChecked int
fillConsistent int
// BornAt stability
LastBornAt time.Time
bornAtChanges int
// DHT challenge
dhtChecked int
dhtSuccess int
dhtBatchCounter int
// Peer witnesses
witnessChecked int
witnessConsistent int
}
// computeNodeSideScore computes the node's quality assessment of an indexer from raw metrics.
// All ratios are in [0,1]; result is in [0,100].
// - uptimeRatio : gap-aware fraction of lifetime the indexer was reachable
// - challengeAccuracy: own-PeerID challenges answered correctly (found + recent lastSeen)
// - latencyScore : 1 - RTT/maxRTT, clamped [0,1]
// - fillScore : 1 - fillRate — prefer less-loaded indexers
// - fillConsistency : fraction of ticks where peerCount/maxNodes ≈ fillRate (±10%)
func (s *Score) ComputeNodeSideScore(latencyScore float64) float64 {
uptime := s.UptimeTracker.UptimeRatio()
challengeAccuracy := 1.0
if s.challengeTotal > 0 {
challengeAccuracy = float64(s.challengeCorrect) / float64(s.challengeTotal)
}
fillScore := 1.0 - s.LastFillRate
fillConsistency := 1.0
if s.fillChecked > 0 {
fillConsistency = float64(s.fillConsistent) / float64(s.fillChecked)
}
witnessConsistency := 1.0
if s.witnessChecked > 0 {
witnessConsistency = float64(s.witnessConsistent) / float64(s.witnessChecked)
}
dhtSuccessRate := 1.0
if s.dhtChecked > 0 {
dhtSuccessRate = float64(s.dhtSuccess) / float64(s.dhtChecked)
}
base := ((0.20 * uptime) +
(0.20 * challengeAccuracy) +
(0.15 * latencyScore) +
(0.10 * fillScore) +
(0.10 * fillConsistency) +
(0.15 * witnessConsistency) +
(0.10 * dhtSuccessRate)) * 100
// BornAt stability: each unexpected BornAt change penalises by 30%.
bornAtPenalty := 1.0 - 0.30*float64(s.bornAtChanges)
if bornAtPenalty < 0 {
bornAtPenalty = 0
}
return base * bornAtPenalty
}
type Directory struct {
MuAddr sync.RWMutex
MuScore sync.RWMutex
MuStream sync.RWMutex
Addrs map[string]*pp.AddrInfo
Scores map[string]*Score
Nudge chan struct{}
Streams ProtocolStream
}
func (d *Directory) ExistsScore(a string) bool {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
for addr, ai := range d.Scores {
if ai != nil && (a == addr) {
return true
}
}
return false
}
func (d *Directory) GetScore(a string) *Score {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
for addr, s := range d.Scores {
if s != nil && (a == addr) {
sCopy := *s
return &sCopy
}
}
return nil
}
func (d *Directory) GetScores() map[string]*Score {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
score := map[string]*Score{}
for addr, s := range d.Scores {
score[addr] = s
}
return score
}
func (d *Directory) DeleteScore(a string) {
d.MuScore.RLock()
defer d.MuScore.RUnlock()
score := map[string]*Score{}
for addr, s := range d.Scores {
if a != addr {
score[addr] = s
}
}
d.Scores = score
}
func (d *Directory) SetScore(addr string, score *Score) *pp.AddrInfo {
d.MuScore.Lock()
defer d.MuScore.Unlock()
d.Scores[addr] = score
return nil
}
func (d *Directory) ExistsAddr(addrOrId string) bool {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
for addr, ai := range d.Addrs {
if ai != nil && (addrOrId == ai.ID.String() || addrOrId == addr) {
return true
}
}
return false
}
func (d *Directory) GetAddr(addrOrId string) *pp.AddrInfo {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
for addr, ai := range d.Addrs {
if ai != nil && (addrOrId == ai.ID.String() || addrOrId == addr) {
aiCopy := *ai
return &aiCopy
}
}
return nil
}
func (d *Directory) DeleteAddr(a string) {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
addrs := map[string]*pp.AddrInfo{}
for addr, s := range d.Addrs {
if a != addr {
addrs[addr] = s
}
}
d.Addrs = addrs
}
func (d *Directory) SetAddr(addr string, info *pp.AddrInfo) *pp.AddrInfo {
d.MuAddr.Lock()
defer d.MuAddr.Unlock()
d.Addrs[addr] = info
return nil
}
func (d *Directory) GetAddrIDs() []pp.ID {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
indexers := make([]pp.ID, 0, len(d.Addrs))
for _, ai := range d.Addrs {
if ai != nil {
indexers = append(indexers, ai.ID)
}
}
return Shuffle(indexers)
}
func (d *Directory) GetAddrsStr() []string {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
indexers := make([]string, 0, len(d.Addrs))
for s, ai := range d.Addrs {
if ai != nil {
indexers = append(indexers, s)
}
}
return Shuffle(indexers)
}
type Entry struct {
Addr string
Info *pp.AddrInfo
}
func (d *Directory) GetAddrs() []Entry {
d.MuAddr.RLock()
defer d.MuAddr.RUnlock()
indexers := make([]Entry, 0, len(d.Addrs))
for addr, ai := range d.Addrs {
if ai != nil {
indexers = append(indexers, Entry{
Addr: addr,
Info: ai,
})
}
}
return Shuffle(indexers)
}
// NudgeIndexerHeartbeat signals the indexer heartbeat goroutine to fire immediately.
func (d *Directory) NudgeIt() {
select {
case d.Nudge <- struct{}{}:
default: // nudge already pending, skip
}
}
type ProtocolStream map[protocol.ID]map[pp.ID]*Stream
func (ps ProtocolStream) Get(protocol protocol.ID) map[pp.ID]*Stream {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
return ps[protocol]
}
func (ps ProtocolStream) GetPerID(protocol protocol.ID, peerID pp.ID) *Stream {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
return ps[protocol][peerID]
}
func (ps ProtocolStream) Add(protocol protocol.ID, peerID *pp.ID, s *Stream) error {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
if peerID != nil {
if s != nil {
ps[protocol][*peerID] = s
} else {
return errors.New("unable to add stream : stream missing")
}
}
return nil
}
func (ps ProtocolStream) Delete(protocol protocol.ID, peerID *pp.ID) {
if streams, ok := ps[protocol]; ok {
if peerID != nil && streams[*peerID] != nil && streams[*peerID].Stream != nil {
streams[*peerID].Stream.Close()
delete(streams, *peerID)
} else {
for _, s := range ps {
for _, v := range s {
if v.Stream != nil {
v.Stream.Close()
}
}
}
delete(ps, protocol)
}
}
}
var Indexers = &Directory{
Addrs: map[string]*pp.AddrInfo{},
Scores: map[string]*Score{},
Nudge: make(chan struct{}, 1),
Streams: ProtocolStream{},
}

View File

@@ -0,0 +1,255 @@
package common
import (
"context"
"encoding/json"
"io"
"time"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
oclib "cloud.o-forge.io/core/oc-lib"
)
type Heartbeat struct {
Name string `json:"name"`
Stream *Stream `json:"stream"`
DID string `json:"did"`
PeerID string `json:"peer_id"`
Timestamp int64 `json:"timestamp"`
IndexersBinded []string `json:"indexers_binded"`
Score float64
// Record carries a fresh signed PeerRecord (JSON) so the receiving indexer
// can republish it to the DHT without an extra round-trip.
// Only set by nodes (not indexers heartbeating other indexers).
Record json.RawMessage `json:"record,omitempty"`
// Need is how many more indexers this node wants (MaxIndexer - current pool size).
// The receiving indexer uses this to know how many suggestions to return.
// 0 means the pool is full — no suggestions needed unless SuggestMigrate.
Need int `json:"need,omitempty"`
// Challenges is a list of PeerIDs the node asks the indexer to spot-check.
// Always includes the node's own PeerID (ground truth) + up to 2 additional
// known peers. Nil means no challenge this tick.
Challenges []string `json:"challenges,omitempty"`
// ChallengeDID asks the indexer to retrieve this DID from the DHT (every 5th batch).
ChallengeDID string `json:"challenge_did,omitempty"`
// Referent marks this indexer as the node's designated search referent.
// Only one indexer per node receives Referent=true at a time (the best-scored one).
// The indexer stores the node in its referencedNodes for distributed search.
Referent bool `json:"referent,omitempty"`
}
// SearchPeerRequest is sent by a node to an indexer via ProtocolSearchPeer.
// The indexer broadcasts it on the GossipSub search mesh and streams results back.
type SearchPeerRequest struct {
QueryID string `json:"query_id"`
// At least one of PeerID, DID, Name must be set.
PeerID string `json:"peer_id,omitempty"`
DID string `json:"did,omitempty"`
Name string `json:"name,omitempty"`
}
// SearchQuery is broadcast on TopicSearchPeer by the receiving indexer.
// EmitterID is the indexer's own PeerID — responding indexers open a
// ProtocolSearchPeerResponse stream back to it.
type SearchQuery struct {
QueryID string `json:"query_id"`
PeerID string `json:"peer_id,omitempty"`
DID string `json:"did,omitempty"`
Name string `json:"name,omitempty"`
EmitterID string `json:"emitter_id"`
}
// SearchPeerResult is sent by a responding indexer to the emitting indexer
// via ProtocolSearchPeerResponse, and forwarded by the emitting indexer to
// the node on the open ProtocolSearchPeer stream.
type SearchPeerResult struct {
QueryID string `json:"query_id"`
Records []SearchHit `json:"records"`
}
// SearchHit is a single peer found during distributed search.
type SearchHit struct {
PeerID string `json:"peer_id"`
DID string `json:"did"`
Name string `json:"name"`
}
// ChallengeEntry is the indexer's raw answer for one challenged peer.
type ChallengeEntry struct {
PeerID string `json:"peer_id"`
Found bool `json:"found"`
LastSeen time.Time `json:"last_seen,omitempty"` // zero if not found
}
// HeartbeatResponse carries raw metrics only — no pre-cooked score.
type HeartbeatResponse struct {
FillRate float64 `json:"fill_rate"`
PeerCount int `json:"peer_count"`
MaxNodes int `json:"max_nodes"` // capacity — lets node cross-check fillRate
BornAt time.Time `json:"born_at"`
Challenges []ChallengeEntry `json:"challenges,omitempty"`
// DHTFound / DHTPayload: response to a ChallengeDID request.
DHTFound bool `json:"dht_found,omitempty"`
DHTPayload json.RawMessage `json:"dht_payload,omitempty"`
// Witnesses: random sample of connected nodes so the querying node can cross-check.
Witnesses []pp.AddrInfo `json:"witnesses,omitempty"`
// Suggestions: better indexers this indexer knows about via its DHT cache.
// The node should open heartbeat connections to these (they become StaticIndexers).
Suggestions []pp.AddrInfo `json:"suggestions,omitempty"`
// SuggestMigrate: set when this indexer is overloaded (fill rate > threshold)
// and is actively trying to hand the node off to the Suggestions list.
// Seeds: node de-stickies this indexer once it has MinIndexer non-seed alternatives.
// Non-seeds: node removes this indexer immediately if it has enough alternatives.
SuggestMigrate bool `json:"suggest_migrate,omitempty"`
}
// ComputeIndexerScore computes a composite quality score [0, 100] for the connecting peer.
// - uptimeRatio: fraction of tracked lifetime online (gap-aware) — peer reliability
// - bpms: bandwidth normalized to MaxExpectedMbps — link capacity
// - diversity: indexer's own /24 subnet diversity — network topology quality
// - latencyScore: 1 - RTT/maxRoundTrip — link responsiveness
// - fillRate: fraction of indexer slots used (0=empty, 1=full) — collective trust signal:
// a fuller indexer has been chosen and retained by many peers, which is evidence of quality.
func (hb *Heartbeat) ComputeIndexerScore(uptimeRatio float64, bpms float64, diversity float64, latencyScore float64, fillRate float64) {
hb.Score = ((0.20 * uptimeRatio) +
(0.20 * bpms) +
(0.20 * diversity) +
(0.15 * latencyScore) +
(0.25 * fillRate)) * 100
}
type HeartbeatInfo []struct {
Info []byte `json:"info"`
}
// WitnessRequest is sent by a node to a peer to ask its view of a given indexer.
type WitnessRequest struct {
IndexerPeerID string `json:"indexer_peer_id"`
}
// WitnessReport is returned by a peer in response to a WitnessRequest.
type WitnessReport struct {
Seen bool `json:"seen"`
BornAt time.Time `json:"born_at,omitempty"`
FillRate float64 `json:"fill_rate,omitempty"`
Score float64 `json:"score,omitempty"`
}
// HandleBandwidthProbe echoes back everything written on the stream, then closes.
// It is registered by all participants so the measuring side (the heartbeat receiver)
// can open a dedicated probe stream and read the round-trip latency + throughput.
func HandleBandwidthProbe(s network.Stream) {
defer s.Close()
s.SetDeadline(time.Now().Add(10 * time.Second))
io.Copy(s, s) // echo every byte back to the sender
}
// HandleWitnessQuery answers a witness query: the caller wants to know
// what this node thinks of a given indexer (identified by its PeerID).
func HandleWitnessQuery(h host.Host, s network.Stream) {
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
var req WitnessRequest
if err := json.NewDecoder(s).Decode(&req); err != nil {
return
}
report := WitnessReport{}
for _, ai := range Indexers.GetAddrs() {
if ai.Info == nil || ai.Info.ID.String() != req.IndexerPeerID {
continue
}
if score := Indexers.GetScore(addrKey(*ai.Info)); score != nil {
report.Seen = true
report.BornAt = score.LastBornAt
report.FillRate = score.LastFillRate
report.Score = score.Score
}
break
}
json.NewEncoder(s).Encode(report)
}
// SupportsHeartbeat probes pid with a short-lived stream to verify it has
// a ProtocolHeartbeat handler (i.e. it is an indexer, not a plain node).
// Only protocol negotiation is performed — no data is sent.
// Returns false on any error, including "protocol not supported".
func SupportsHeartbeat(h host.Host, pid pp.ID) bool {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
s, err := h.NewStream(ctx, pid, ProtocolHeartbeat)
if err != nil {
return false
}
s.Reset()
return true
}
// queryWitnesses contacts each witness in parallel, collects their view of the
// indexer, and updates score.witnessChecked / score.witnessConsistent.
// Called in a goroutine — must not hold any lock.
func queryWitnesses(h host.Host, indexerPeerID string, indexerBornAt time.Time, indexerFillRate float64, witnesses []pp.AddrInfo, score *Score) {
logger := oclib.GetLogger()
type result struct{ consistent bool }
results := make(chan result, len(witnesses))
for _, ai := range witnesses {
if ai.ID == h.ID() {
// Never query ourselves — skip and count as inconclusive.
results <- result{}
continue
}
go func(ai pp.AddrInfo) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := h.NewStream(ctx, ai.ID, ProtocolWitnessQuery)
if err != nil {
results <- result{}
return
}
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
if err := json.NewEncoder(s).Encode(WitnessRequest{IndexerPeerID: indexerPeerID}); err != nil {
results <- result{}
return
}
var rep WitnessReport
if err := json.NewDecoder(s).Decode(&rep); err != nil || !rep.Seen {
results <- result{}
return
}
// BornAt must be identical (fixed timestamp).
bornAtOK := !rep.BornAt.IsZero() && rep.BornAt.Equal(indexerBornAt)
// FillRate coherent within ±25% (it fluctuates normally).
diff := rep.FillRate - indexerFillRate
if diff < 0 {
diff = -diff
}
fillOK := diff < 0.25
consistent := bornAtOK && fillOK
logger.Debug().
Str("witness", ai.ID.String()).
Bool("bornAt_ok", bornAtOK).
Bool("fill_ok", fillOK).
Msg("witness report")
results <- result{consistent: consistent}
}(ai)
}
checked, consistent := 0, 0
for range witnesses {
r := <-results
checked++
if r.consistent {
consistent++
}
}
if checked == 0 {
return
}
score.witnessChecked += checked
score.witnessConsistent += consistent
}

View File

@@ -0,0 +1,589 @@
package common
import (
"context"
"encoding/json"
"fmt"
"math/rand"
"strings"
"sync/atomic"
"time"
"oc-discovery/conf"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
var TimeWatcher time.Time
// retryRunning guards against launching multiple retryUntilSeedResponds goroutines.
var retryRunning atomic.Bool
func ConnectToIndexers(h host.Host, minIndexer int, maxIndexer int, recordFn ...func() json.RawMessage) error {
TimeWatcher = time.Now().UTC()
logger := oclib.GetLogger()
// Bootstrap from IndexerAddresses seed set.
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
if len(addresses) > maxIndexer {
addresses = addresses[0:maxIndexer]
}
for _, indexerAddr := range addresses {
indexerAddr = strings.TrimSpace(indexerAddr)
if indexerAddr == "" {
continue
}
ad, err := pp.AddrInfoFromString(indexerAddr)
if err != nil {
logger.Err(err)
continue
}
key := ad.ID.String()
Indexers.SetAddr(key, ad)
// Pre-create score entry with IsSeed=true so the sticky flag is set before
// the first heartbeat tick (lazy creation in doTick would lose the flag).
if !Indexers.ExistsScore(key) {
Indexers.SetScore(key, &Score{
FirstContacted: time.Now().UTC(),
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
nextChallenge: rand.Intn(10) + 1,
IsSeed: true,
})
}
}
seeds := Indexers.GetAddrs()
indexerCount := len(seeds)
if indexerCount < minIndexer {
return fmt.Errorf("you run a node without indexers... your gonna be isolated.")
}
// Start long-lived heartbeat to seed indexers. The single goroutine follows
// all subsequent StaticIndexers changes.
SendHeartbeat(context.Background(), ProtocolHeartbeat, conf.GetConfig().Name,
h, Indexers, 20*time.Second, maxIndexer, recordFn...)
// Watch for inbound connections: if a peer connects to us and our pool has
// room, probe it first to confirm it supports ProtocolHeartbeat (i.e. it is
// an indexer). Plain nodes don't register the handler — the negotiation fails
// instantly so we never pollute the pool with non-indexer peers.
h.Network().Notify(&network.NotifyBundle{
ConnectedF: func(n network.Network, c network.Conn) {
if c.Stat().Direction != network.DirInbound {
return
}
if len(Indexers.GetAddrs()) >= maxIndexer {
return
}
peerID := c.RemotePeer()
if Indexers.ExistsAddr(peerID.String()) {
return
}
// Probe in a goroutine — ConnectedF must not block.
go func(pid pp.ID) {
if !SupportsHeartbeat(h, pid) {
return // plain node, skip
}
if len(Indexers.GetAddrs()) >= maxIndexer {
return
}
if Indexers.ExistsAddr(pid.String()) {
return
}
addrs := h.Peerstore().Addrs(pid)
if len(addrs) == 0 {
return
}
ai := FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
if len(ai.Addrs) == 0 {
return
}
adCopy := ai
Indexers.SetAddr(pid.String(), &adCopy)
Indexers.NudgeIt()
log := oclib.GetLogger()
log.Info().Str("peer", pid.String()).
Msg("[pool] inbound indexer peer added as candidate")
}(peerID)
},
})
// Proactive DHT upgrade: once seeds are connected and the DHT routing table
// is warm, discover better indexers and add them to the pool alongside the seeds.
// Seeds stay as guaranteed anchors; scoring will demote poor performers over time.
go func(seeds []Entry) {
// Let seed connections establish and the DHT routing table warm up.
time.Sleep(5 * time.Second)
// For pure nodes (no IndexerService), spin up a lightweight DHT client.
if discoveryDHT == nil {
if len(seeds) == 0 {
return
}
initNodeDHT(h, seeds)
}
if discoveryDHT == nil {
return
}
current := len(Indexers.GetAddrs())
need := maxIndexer - current
if need <= 0 {
need = maxIndexer / 2 // diversify even when pool is already at capacity
}
logger.Info().Int("need", need).Msg("[dht] proactive indexer discovery from DHT")
replenishIndexersFromDHT(h, need)
}(seeds)
return nil
}
// reconnectToSeeds re-adds the configured seed indexers to StaticIndexers as
// sticky fallback entries. Called when the pool drops to zero so the node
// never becomes completely isolated.
func reconnectToSeeds() {
logger := oclib.GetLogger()
logger.Warn().Msg("[pool] all indexers lost, reconnecting to configured seeds")
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
for _, addrStr := range addresses {
addrStr = strings.TrimSpace(addrStr)
if addrStr == "" {
continue
}
ad, err := pp.AddrInfoFromString(addrStr)
if err != nil {
continue
}
key := ad.ID.String()
Indexers.SetAddr(key, ad)
if score := Indexers.GetScore(key); score == nil {
Indexers.SetScore(key, &Score{
FirstContacted: time.Now().UTC(),
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
nextChallenge: rand.Intn(10) + 1,
IsSeed: true,
})
} else {
// Restore sticky flag so the seed is not immediately re-ejected.
score.IsSeed = true
}
}
}
// retryUntilSeedResponds loops with exponential backoff until at least one
// configured seed is reachable again. Once seeds are back in the pool it
// nudges the heartbeat loop and lets the normal DHT upgrade path take over.
// Should be called in a goroutine — it blocks until the situation resolves.
// Panics immediately if no seeds are configured: there is nothing to wait for.
func retryUntilSeedResponds() {
if !retryRunning.CompareAndSwap(false, true) {
return // another goroutine is already running the retry loop
}
defer retryRunning.Store(false)
logger := oclib.GetLogger()
rawAddresses := strings.TrimSpace(conf.GetConfig().IndexerAddresses)
if rawAddresses == "" {
// No seeds configured: rely on the inbound-connection notifee to fill
// the pool. Just wait patiently — the loop below will return as soon
// as any peer connects and NudgeIt() is called.
logger.Warn().Msg("[pool] pool empty and no seeds configured — waiting for inbound indexer")
}
backoff := 10 * time.Second
const maxBackoff = 5 * time.Minute
for {
time.Sleep(backoff)
if backoff < maxBackoff {
backoff *= 2
}
// Check whether someone else already refilled the pool.
if len(Indexers.GetAddrs()) > 0 {
logger.Info().Msg("[pool] pool refilled externally, stopping seed retry")
return
}
logger.Warn().Dur("backoff", backoff).Msg("[pool] still isolated, retrying seeds")
reconnectToSeeds()
if len(Indexers.GetAddrs()) > 0 {
Indexers.NudgeIt()
// Re-bootstrap DHT now that we have at least one connection candidate.
if discoveryDHT != nil {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
discoveryDHT.Bootstrap(ctx) //nolint:errcheck
cancel()
}
return
}
}
}
// ensureScore returns the Score for addr, creating it if absent.
func ensureScore(d *Directory, addr string) *Score {
if !d.ExistsScore(addr) {
d.SetScore(addr, &Score{
FirstContacted: time.Now().UTC(),
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
nextChallenge: rand.Intn(10) + 1,
})
}
return d.GetScore(addr)
}
// evictPeer removes addr from directory atomically and returns a snapshot of
// remaining AddrInfos (for consensus voter selection).
func evictPeer(d *Directory, addr string, id pp.ID, proto protocol.ID) []pp.AddrInfo {
d.Streams.Delete(proto, &id)
d.DeleteAddr(addr)
voters := make([]pp.AddrInfo, 0, len(d.Addrs))
for _, ai := range d.GetAddrs() {
if ai.Info != nil {
voters = append(voters, *ai.Info)
}
}
d.DeleteScore(addr)
return voters
}
// handleSuggestions adds unknown suggested indexers to the directory.
func handleSuggestions(d *Directory, from string, suggestions []pp.AddrInfo) {
added := 0
for _, sug := range suggestions {
key := addrKey(sug)
if !d.ExistsAddr(key) {
cpy := sug
d.SetAddr(key, &cpy)
added++
}
}
if added > 0 {
logger := oclib.GetLogger()
logger.Info().Int("added", added).Str("from", from).
Msg("added suggested indexers from heartbeat response")
d.NudgeIt()
}
}
// SendHeartbeat starts a goroutine that sends periodic heartbeats to peers.
// recordFn, when provided, is called on each tick and its output is embedded in
// the heartbeat as a fresh signed PeerRecord so the receiving indexer can
// republish it to the DHT without an extra round-trip.
// Pass no recordFn (or nil) for indexer→indexer / native heartbeats.
func SendHeartbeat(ctx context.Context, proto protocol.ID, name string, h host.Host, directory *Directory, interval time.Duration, maxPool int, recordFn ...func() json.RawMessage) {
logger := oclib.GetLogger()
isIndexerHB := directory == Indexers
var recFn func() json.RawMessage
if len(recordFn) > 0 {
recFn = recordFn[0]
}
go func() {
logger.Info().Str("proto", string(proto)).Int("peers", len(directory.Addrs)).Msg("heartbeat started")
t := time.NewTicker(interval)
defer t.Stop()
// peerEntry pairs addr key with AddrInfo so doTick can update score maps directly.
type peerEntry struct {
addr string
ai *pp.AddrInfo
}
doTick := func() {
addrs := directory.GetAddrsStr()
need := maxPool - len(addrs)
if need < 0 {
need = 0
}
baseHB := Heartbeat{
Name: name,
PeerID: h.ID().String(),
Timestamp: time.Now().UTC().Unix(),
IndexersBinded: addrs,
Need: need,
}
if recFn != nil {
baseHB.Record = recFn()
}
// Determine the referent indexer: highest-scored one receives Referent=true
// so it stores us in its referencedNodes for distributed search.
var referentAddr string
if isIndexerHB {
var bestScore float64 = -1
for _, ai2 := range directory.GetAddrs() {
if s := directory.GetScore(ai2.Addr); s != nil && s.Score > bestScore {
bestScore = s.Score
referentAddr = ai2.Addr
}
}
}
for _, ai := range directory.GetAddrs() {
// Build per-peer heartbeat copy so challenge injection is peer-specific.
hb := baseHB
if isIndexerHB && referentAddr != "" && ai.Addr == referentAddr {
hb.Referent = true
}
// Ensure an IndexerScore entry exists for this peer.
var score *Score
if isIndexerHB {
score = ensureScore(directory, ai.Addr)
// Inject challenge batch if due (random 1-10 HBs between batches).
score.hbCount++
if score.hbCount >= score.nextChallenge {
// Ground truth: node's own PeerID — indexer MUST have us.
challenges := []string{h.ID().String()}
// Add up to 2 more known peers (other indexers) for richer data.
// Use the already-snapshotted entries to avoid re-locking.
for _, ai2 := range directory.GetAddrs() {
if ai2.Addr != ai.Addr && ai2.Info != nil {
challenges = append(challenges, ai2.Info.ID.String())
if len(challenges) >= 3 {
break
}
}
}
hb.Challenges = challenges
score.hbCount = 0
score.nextChallenge = rand.Intn(10) + 1
score.challengeTotal++ // count own-PeerID challenge (ground truth)
score.dhtBatchCounter++
// DHT challenge every 5th batch: ask indexer to retrieve our own DID.
if score.dhtBatchCounter%5 == 0 {
var selfDID string
if len(baseHB.Record) > 0 {
var partial struct {
DID string `json:"did"`
}
if json.Unmarshal(baseHB.Record, &partial) == nil {
selfDID = partial.DID
}
}
if selfDID != "" {
hb.ChallengeDID = selfDID
}
}
}
}
resp, rtt, err := sendHeartbeat(ctx, h, proto, ai.Info, hb, directory.Streams, interval*time.Second)
if err != nil { // Heartbeat fails
HeartbeatFailure(h, proto, directory, ai.Addr, ai.Info, isIndexerHB, maxPool, err)
continue
}
// Update IndexerScore — uptime recorded on any successful send,
// even if the indexer does not support bidirectional heartbeat (Fix 1).
if isIndexerHB && score != nil {
score.UptimeTracker.RecordHeartbeat()
score.UptimeTracker.ConsecutiveFails = 0 // reset on success
maxRTT := BaseRoundTrip * 10
latencyScore := 1.0 - float64(rtt)/float64(maxRTT)
if latencyScore < 0 {
latencyScore = 0
}
if latencyScore > 1 {
latencyScore = 1
}
// Update fill / challenge fields only when the indexer responded.
if resp != nil {
// BornAt stability check.
if score.LastBornAt.IsZero() {
score.LastBornAt = resp.BornAt
} else if !resp.BornAt.IsZero() && !resp.BornAt.Equal(score.LastBornAt) {
score.bornAtChanges++
score.LastBornAt = resp.BornAt
logger.Warn().Str("peer", ai.Info.ID.String()).
Int("changes", score.bornAtChanges).
Msg("indexer BornAt changed — possible restart or impersonation")
}
score.LastFillRate = resp.FillRate
// Fill rate consistency: cross-check peerCount/maxNodes vs reported fillRate.
if resp.MaxNodes > 0 {
expected := float64(resp.PeerCount) / float64(resp.MaxNodes)
diff := expected - resp.FillRate
if diff < 0 {
diff = -diff
}
score.fillChecked++
if diff < 0.1 {
score.fillConsistent++
}
}
// Validate challenge responses. Only own-PeerID counts as ground truth.
if len(hb.Challenges) > 0 && len(resp.Challenges) > 0 {
ownID := h.ID().String()
for _, ce := range resp.Challenges {
if ce.PeerID != ownID {
continue // informational only
}
recentEnough := !ce.LastSeen.IsZero() &&
time.Since(ce.LastSeen) < 2*RecommendedHeartbeatInterval
if ce.Found && recentEnough {
score.challengeCorrect++
}
logger.Info().Str("peer", ai.Info.ID.String()).
Bool("found", ce.Found).
Bool("recent", recentEnough).
Msg("own-PeerID challenge result")
break
}
}
// DHT challenge result.
if hb.ChallengeDID != "" {
score.dhtChecked++
if resp.DHTFound {
score.dhtSuccess++
}
}
// Launch witness cross-check asynchronously (must not hold lock).
if len(resp.Witnesses) > 0 {
go queryWitnesses(h, ai.Info.ID.String(), resp.BornAt, resp.FillRate, resp.Witnesses, score)
} else if resp.MaxNodes > 0 {
// No witnesses offered. Valid if indexer only has us (PeerCount==1).
// Cross-check: FillRate should equal 1/MaxNodes within ±10%.
expected := 1.0 / float64(resp.MaxNodes)
diff := resp.FillRate - expected
if diff < 0 {
diff = -diff
}
score.witnessChecked++
if resp.PeerCount == 1 && diff < 0.1 {
score.witnessConsistent++
}
}
}
score.Score = score.ComputeNodeSideScore(latencyScore)
age := score.UptimeTracker.Uptime()
minScore := dynamicMinScore(age)
// Fix 4: grace period — at least 2 full heartbeat cycles before ejecting.
isSeed := score.IsSeed
// Seeds are sticky: never evicted by score alone (SuggestMigrate handles it).
// Never eject the last indexer by score alone — we would lose all connectivity.
belowThreshold := score.Score < minScore &&
score.UptimeTracker.TotalOnline >= 2*RecommendedHeartbeatInterval &&
!isSeed &&
len(directory.Addrs) > 1
if belowThreshold {
logger.Info().Str("peer", ai.Info.ID.String()).
Float64("score", score.Score).Float64("min", minScore).
Msg("indexer score below threshold, removing from pool")
voters := evictPeer(directory, ai.Addr, ai.Info.ID, proto)
need := max(maxPool-len(voters), 1)
if len(voters) > 0 {
go TriggerConsensus(h, voters, need)
} else {
go replenishIndexersFromDHT(h, need)
}
}
// Accept suggestions from this indexer — add unknown ones to the directory.
if resp != nil && len(resp.Suggestions) > 0 {
handleSuggestions(directory, ai.Info.ID.String(), resp.Suggestions)
}
// Handle SuggestMigrate: indexer is overloaded and wants us to move.
if resp != nil && resp.SuggestMigrate && isIndexerHB {
nonSeedCount := 0
for _, sc := range directory.GetScores() {
if !sc.IsSeed {
nonSeedCount++
}
}
if nonSeedCount >= conf.GetConfig().MinIndexer {
if isSeed {
// Seed has offloaded us: clear sticky flag, score eviction takes over.
score.IsSeed = false
logger.Info().Str("peer", ai.Info.ID.String()).
Msg("seed discharged via SuggestMigrate, de-stickied")
} else {
evictPeer(directory, ai.Addr, ai.Info.ID, proto)
logger.Info().Str("peer", ai.Info.ID.String()).Msg("accepted migration from overloaded indexer")
}
}
}
}
}
}
for {
select {
case <-t.C:
doTick()
case <-directory.Nudge:
if isIndexerHB {
logger.Info().Msg("nudge received, heartbeating new indexers immediately")
doTick()
}
case <-ctx.Done():
return
}
}
}()
}
func HeartbeatFailure(h host.Host, proto protocol.ID, directory *Directory,
addr string, info *pp.AddrInfo, isIndexerHB bool, maxPool int, err error) {
logger := oclib.GetLogger()
logger.Err(err)
// Seeds are never evicted on heartbeat failure.
// Keeping them in the pool lets the regular 60-second ticker retry them
// at a natural cadence — no reconnect storm, no libp2p dial-backoff accumulation.
// A seed will self-heal once it comes back; DHT and inbound peers fill the gap.
if isIndexerHB {
if score := directory.GetScore(addr); score != nil {
if score.IsSeed {
logger.Warn().Str("peer", info.ID.String()).
Msg("[pool] seed heartbeat failed — keeping in pool, ticker will retry " + err.Error())
return
}
// Indirect probing via other alive indexers:
// If other indexers in the pool are still responding, they act as implicit
// third-party witnesses confirming our connectivity is fine — the failed
// indexer is genuinely dead, evict immediately.
// If this is the last indexer, there is no third party. Retry up to 3 times
// (consecutive failures tracked in UptimeTracker) before declaring it dead.
if len(directory.GetAddrs()) <= 1 {
score.UptimeTracker.ConsecutiveFails++
if score.UptimeTracker.ConsecutiveFails < 3 {
logger.Warn().Str("peer", info.ID.String()).
Int("attempt", score.UptimeTracker.ConsecutiveFails).
Msg("[indirect] last indexer failed, retrying before eviction")
return
}
logger.Warn().Str("peer", info.ID.String()).
Msg("[indirect] last indexer failed 3 times consecutively, evicting")
}
}
}
logger.Info().Str("peer", info.ID.String()).Str("proto", string(proto)).
Msg("heartbeat failed, removing peer from pool : " + err.Error())
consensusVoters := evictPeer(directory, addr, info.ID, proto)
if isIndexerHB {
need := maxPool - len(consensusVoters)
if need < 1 {
need = 1
}
logger.Info().Int("remaining", len(consensusVoters)).Int("need", need).Msg("pool state after removal")
poolSize := len(directory.GetAddrs())
if poolSize == 0 {
// Pool is truly empty (no seeds configured or no seeds in pool).
// Start the backoff retry loop — it will re-add seeds and nudge
// only once a seed actually responds.
go retryUntilSeedResponds()
} else if len(consensusVoters) > 0 {
go TriggerConsensus(h, consensusVoters, need)
} else {
go replenishIndexersFromDHT(h, need)
}
}
}

View File

@@ -28,7 +28,7 @@ type Event struct {
}
func NewEvent(name string, from string, dt *tools.DataType, user string, payload []byte) *Event {
priv, err := LoadKeyFromFilePrivate() // your node private key
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
if err != nil {
return nil
}
@@ -73,7 +73,11 @@ func (event *Event) Verify(p *peer.Peer) error {
if p.Relation == peer.BLACKLIST { // if peer is blacklisted... quit...
return errors.New("peer is blacklisted")
}
pubKey, err := PubKeyFromString(p.PublicKey) // extract pubkey from pubkey str
return event.VerifySignature(p.PublicKey)
}
func (event *Event) VerifySignature(pk string) error {
pubKey, err := PubKeyFromString(pk) // extract pubkey from pubkey str
if err != nil {
return errors.New("pubkey is malformed")
}
@@ -88,8 +92,8 @@ func (event *Event) Verify(p *peer.Peer) error {
}
type TopicNodeActivityPub struct {
NodeActivity peer.PeerState
Disposer pp.AddrInfo `json:"disposer_address"`
NodeActivity int `json:"node_activity"`
Disposer string `json:"disposer_address"`
Name string `json:"name"`
DID string `json:"did"` // real PEER ID
PeerID string `json:"peer_id"`
@@ -108,6 +112,12 @@ func NewLongLivedPubSubService(h host.Host) *LongLivedPubSubService {
}
}
func (s *LongLivedPubSubService) GetPubSub(topicName string) *pubsub.Topic {
s.PubsubMu.Lock()
defer s.PubsubMu.Unlock()
return s.LongLivedPubSubs[topicName]
}
func (s *LongLivedPubSubService) processEvent(
ctx context.Context,
p *peer.Peer,
@@ -119,26 +129,8 @@ func (s *LongLivedPubSubService) processEvent(
return handler(ctx, topicName, event)
}
const TopicPubSubNodeActivity = "oc-node-activity"
const TopicPubSubSearch = "oc-node-search"
func (s *LongLivedPubSubService) SubscribeToNodeActivity(ps *pubsub.PubSub, f *func(context.Context, TopicNodeActivityPub, string)) error {
ps.RegisterTopicValidator(TopicPubSubNodeActivity, func(ctx context.Context, p pp.ID, m *pubsub.Message) bool {
return true
})
if topic, err := ps.Join(TopicPubSubNodeActivity); err != nil {
return err
} else {
s.PubsubMu.Lock()
defer s.PubsubMu.Unlock()
s.LongLivedPubSubs[TopicPubSubNodeActivity] = topic
}
if f != nil {
return SubscribeEvents(s, context.Background(), TopicPubSubNodeActivity, -1, *f)
}
return nil
}
func (s *LongLivedPubSubService) SubscribeToSearch(ps *pubsub.PubSub, f *func(context.Context, Event, string)) error {
ps.RegisterTopicValidator(TopicPubSubSearch, func(ctx context.Context, p pp.ID, m *pubsub.Message) bool {
return true
@@ -147,8 +139,8 @@ func (s *LongLivedPubSubService) SubscribeToSearch(ps *pubsub.PubSub, f *func(co
return err
} else {
s.PubsubMu.Lock()
defer s.PubsubMu.Unlock()
s.LongLivedPubSubs[TopicPubSubSearch] = topic
s.PubsubMu.Unlock()
}
if f != nil {
return SubscribeEvents(s, context.Background(), TopicPubSubSearch, -1, *f)
@@ -159,20 +151,16 @@ func (s *LongLivedPubSubService) SubscribeToSearch(ps *pubsub.PubSub, f *func(co
func SubscribeEvents[T interface{}](s *LongLivedPubSubService,
ctx context.Context, proto string, timeout int, f func(context.Context, T, string),
) error {
s.PubsubMu.Lock()
if s.LongLivedPubSubs[proto] == nil {
s.PubsubMu.Unlock()
return errors.New("no protocol subscribed in pubsub")
}
topic := s.LongLivedPubSubs[proto]
s.PubsubMu.Unlock()
sub, err := topic.Subscribe() // then subscribe to it
if err != nil {
return err
}
// launch loop waiting for results.
go waitResults[T](s, ctx, sub, proto, timeout, f)
go waitResults(s, ctx, sub, proto, timeout, f)
return nil
}
@@ -182,6 +170,7 @@ func waitResults[T interface{}](s *LongLivedPubSubService, ctx context.Context,
for {
s.PubsubMu.Lock() // check safely if cache is actually notified subscribed to topic
if s.LongLivedPubSubs[proto] == nil { // if not kill the loop.
s.PubsubMu.Unlock()
break
}
s.PubsubMu.Unlock()
@@ -207,10 +196,5 @@ func waitResults[T interface{}](s *LongLivedPubSubService, ctx context.Context,
continue
}
f(ctx, evt, fmt.Sprintf("%v", proto))
/*if p, err := ps.Node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
if err := ps.processEvent(ctx, p[0], &evt, topicName); err != nil {
logger.Err(err)
}
}*/
}
}

View File

@@ -0,0 +1,183 @@
package common
import (
"context"
cr "crypto/rand"
"io"
"net"
"slices"
"time"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
)
const MaxExpectedMbps = 100.0
const MinPayloadChallenge = 512
const MaxPayloadChallenge = 2048
const BaseRoundTrip = 400 * time.Millisecond
type UptimeTracker struct {
FirstSeen time.Time
LastSeen time.Time
TotalOnline time.Duration
ConsecutiveFails int // incremented on each heartbeat failure; reset to 0 on success
}
// RecordHeartbeat accumulates online time gap-aware: only counts the interval if
// the gap since the last heartbeat is within 2× the recommended interval (i.e. no
// extended outage). Call this each time a heartbeat is successfully processed.
func (u *UptimeTracker) RecordHeartbeat() {
now := time.Now().UTC()
if !u.LastSeen.IsZero() {
gap := now.Sub(u.LastSeen)
if gap <= 2*RecommendedHeartbeatInterval {
u.TotalOnline += gap
}
}
u.LastSeen = now
}
func (u *UptimeTracker) Uptime() time.Duration {
return time.Since(u.FirstSeen)
}
// UptimeRatio returns the fraction of tracked lifetime during which the peer was
// continuously online (gap ≤ 2×RecommendedHeartbeatInterval). Returns 0 before
// the first heartbeat interval has elapsed.
func (u *UptimeTracker) UptimeRatio() float64 {
total := time.Since(u.FirstSeen)
if total <= 0 {
return 0
}
ratio := float64(u.TotalOnline) / float64(total)
if ratio > 1 {
ratio = 1
}
return ratio
}
func (u *UptimeTracker) IsEligible(min time.Duration) bool {
return u.Uptime() >= min
}
// getBandwidthChallengeRate opens a dedicated ProtocolBandwidthProbe stream to
// remotePeer, sends a random payload, reads the echo, and computes throughput
// and a latency score. Returns (ok, bpms, latencyScore, error).
// latencyScore is 1.0 when RTT is very fast and 0.0 when at or beyond maxRoundTrip.
// Using a separate stream avoids mixing binary data on the JSON heartbeat stream
// and ensures the echo handler is actually running on the remote side.
func getBandwidthChallengeRate(h host.Host, remotePeer pp.ID, payloadSize int) (bool, float64, float64, error) {
payload := make([]byte, payloadSize)
if _, err := cr.Read(payload); err != nil {
return false, 0, 0, err
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
s, err := h.NewStream(ctx, remotePeer, ProtocolBandwidthProbe)
if err != nil {
return false, 0, 0, err
}
defer s.Reset()
s.SetDeadline(time.Now().Add(10 * time.Second))
start := time.Now()
if _, err = s.Write(payload); err != nil {
return false, 0, 0, err
}
s.CloseWrite()
// Half-close the write side so the handler's io.Copy sees EOF and stops.
// Read the echo.
response := make([]byte, payloadSize)
if _, err = io.ReadFull(s, response); err != nil {
return false, 0, 0, err
}
duration := time.Since(start)
maxRoundTrip := BaseRoundTrip + (time.Duration(payloadSize) * (100 * time.Millisecond))
mbps := float64(payloadSize*8) / duration.Seconds() / 1e6
// latencyScore: 1.0 = instant, 0.0 = at maxRoundTrip or beyond.
latencyScore := 1.0 - float64(duration)/float64(maxRoundTrip)
if latencyScore < 0 {
latencyScore = 0
}
if latencyScore > 1 {
latencyScore = 1
}
if duration > maxRoundTrip || mbps < 5.0 {
return false, float64(mbps / MaxExpectedMbps), latencyScore, nil
}
return true, float64(mbps / MaxExpectedMbps), latencyScore, nil
}
func getDiversityRate(h host.Host, peers []string) float64 {
peers, _ = checkPeers(h, peers)
diverse := []string{}
for _, p := range peers {
ip, err := ExtractIP(p)
if err != nil {
continue
}
div := ip.Mask(net.CIDRMask(24, 32)).String()
if !slices.Contains(diverse, div) {
diverse = append(diverse, div)
}
}
if len(diverse) == 0 || len(peers) == 0 {
return 1
}
return float64(len(diverse)) / float64(len(peers))
}
// getOwnDiversityRate measures subnet /24 diversity of the indexer's own connected peers.
// This evaluates the indexer's network position rather than the connecting node's topology.
func getOwnDiversityRate(h host.Host) float64 {
diverse := map[string]struct{}{}
total := 0
for _, pid := range h.Network().Peers() {
for _, maddr := range h.Peerstore().Addrs(pid) {
total++
ip, err := ExtractIP(maddr.String())
if err != nil {
continue
}
diverse[ip.Mask(net.CIDRMask(24, 32)).String()] = struct{}{}
}
}
if total == 0 {
return 1
}
return float64(len(diverse)) / float64(total)
}
func checkPeers(h host.Host, peers []string) ([]string, []string) {
concretePeer := []string{}
ips := []string{}
for _, p := range peers {
ad, err := pp.AddrInfoFromString(p)
if err != nil {
continue
}
if PeerIsAlive(h, *ad) {
concretePeer = append(concretePeer, p)
if ip, err := ExtractIP(p); err == nil {
ips = append(ips, ip.Mask(net.CIDRMask(24, 32)).String())
}
}
}
return concretePeer, ips
}
// dynamicMinScore returns the minimum acceptable score for a peer, starting
// permissive (20%) for brand-new peers and hardening linearly to 80% over 24h.
// This prevents ejecting newcomers in fresh networks while filtering parasites.
func dynamicMinScore(age time.Duration) float64 {
hours := age.Hours()
score := 20.0 + 60.0*(hours/24.0)
if score > 80.0 {
score = 80.0
}
return score
}

View File

@@ -0,0 +1,302 @@
package common
import (
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"strings"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type LongLivedStreamRecordedService[T interface{}] struct {
*LongLivedPubSubService
StreamRecords map[protocol.ID]map[pp.ID]*StreamRecord[T]
StreamMU sync.RWMutex
maxNodesConn int
// AllowInbound, when set, is called once at stream open before any heartbeat
// is decoded. remotePeer is the connecting peer; isNew is true when no
// StreamRecord exists yet (first-ever connection). Return a non-nil error
// to immediately reset the stream and refuse the peer.
AllowInbound func(remotePeer pp.ID, isNew bool) error
// ValidateHeartbeat, when set, is called inside the heartbeat loop after
// each successful CheckHeartbeat decode. Return a non-nil error to reset
// the stream and terminate the session.
ValidateHeartbeat func(remotePeer pp.ID) error
// AfterHeartbeat is called after each successful heartbeat with the full
// decoded Heartbeat so the hook can use the fresh embedded PeerRecord.
AfterHeartbeat func(hb *Heartbeat)
// AfterDelete is called after gc() evicts an expired peer, outside the lock.
// name and did may be empty if the HeartbeatStream had no metadata.
AfterDelete func(pid pp.ID, name string, did string)
// BuildHeartbeatResponse, when set, is called after each successfully decoded
// heartbeat to build the response sent back to the node.
// remotePeer is the peer that sent the heartbeat (used for offload routing).
// need is how many more indexers the node wants (from hb.Need).
// referent is true when the node designated this indexer as its search referent.
BuildHeartbeatResponse func(remotePeer pp.ID, need int, challenges []string, challengeDID string, referent bool) *HeartbeatResponse
}
func (ix *LongLivedStreamRecordedService[T]) MaxNodesConn() int {
return ix.maxNodesConn
}
func NewStreamRecordedService[T interface{}](h host.Host, maxNodesConn int) *LongLivedStreamRecordedService[T] {
service := &LongLivedStreamRecordedService[T]{
LongLivedPubSubService: NewLongLivedPubSubService(h),
StreamRecords: map[protocol.ID]map[pp.ID]*StreamRecord[T]{},
maxNodesConn: maxNodesConn,
}
go service.StartGC(30 * time.Second)
// Garbage collection is needed on every Map of Long-Lived Stream... it may be a top level redesigned
go service.Snapshot(1 * time.Hour)
return service
}
func (ix *LongLivedStreamRecordedService[T]) StartGC(interval time.Duration) {
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
fmt.Println("ACTUALLY RELATED INDEXERS", Indexers.Addrs, len(Indexers.Addrs))
ix.gc()
}
}()
}
func (ix *LongLivedStreamRecordedService[T]) gc() {
ix.StreamMU.Lock()
now := time.Now().UTC()
if ix.StreamRecords[ProtocolHeartbeat] == nil {
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
ix.StreamMU.Unlock()
return
}
streams := ix.StreamRecords[ProtocolHeartbeat]
type gcEntry struct {
pid pp.ID
name string
did string
}
var evicted []gcEntry
for pid, rec := range streams {
if now.After(rec.HeartbeatStream.Expiry) || now.Sub(rec.HeartbeatStream.UptimeTracker.LastSeen) > 2*rec.HeartbeatStream.Expiry.Sub(now) {
name, did := "", ""
if rec.HeartbeatStream != nil {
name = rec.HeartbeatStream.Name
did = rec.HeartbeatStream.DID
}
evicted = append(evicted, gcEntry{pid, name, did})
for _, sstreams := range ix.StreamRecords {
if sstreams[pid] != nil {
delete(sstreams, pid)
}
}
}
}
ix.StreamMU.Unlock()
if ix.AfterDelete != nil {
for _, e := range evicted {
ix.AfterDelete(e.pid, e.name, e.did)
}
}
}
func (ix *LongLivedStreamRecordedService[T]) Snapshot(interval time.Duration) {
go func() {
logger := oclib.GetLogger()
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
infos := ix.snapshot()
for _, inf := range infos {
logger.Info().Msg(" -> " + inf.DID)
}
}
}()
}
// -------- Snapshot / Query --------
func (ix *LongLivedStreamRecordedService[T]) snapshot() []*StreamRecord[T] {
ix.StreamMU.Lock()
defer ix.StreamMU.Unlock()
out := make([]*StreamRecord[T], 0, len(ix.StreamRecords))
for _, streams := range ix.StreamRecords {
for _, stream := range streams {
out = append(out, stream)
}
}
return out
}
func (ix *LongLivedStreamRecordedService[T]) HandleHeartbeat(s network.Stream) {
logger := oclib.GetLogger()
defer s.Close()
// AllowInbound: burst guard + ban check before the first byte is read.
if ix.AllowInbound != nil {
remotePeer := s.Conn().RemotePeer()
ix.StreamMU.RLock()
_, exists := ix.StreamRecords[ProtocolHeartbeat][remotePeer]
ix.StreamMU.RUnlock()
if err := ix.AllowInbound(remotePeer, !exists); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("inbound connection refused")
s.Reset()
return
}
}
dec := json.NewDecoder(s)
for {
ix.StreamMU.Lock()
if ix.StreamRecords[ProtocolHeartbeat] == nil {
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
}
streams := ix.StreamRecords[ProtocolHeartbeat]
streamsAnonym := map[pp.ID]HeartBeatStreamed{}
for k, v := range streams {
streamsAnonym[k] = v
}
ix.StreamMU.Unlock()
pid, hb, err := CheckHeartbeat(ix.Host, s, dec, streamsAnonym, &ix.StreamMU, ix.maxNodesConn)
if err != nil {
// Stream-level errors (EOF, reset, closed) mean the connection is gone
// — exit so the goroutine doesn't spin forever on a dead stream.
// Metric/policy errors (score too low, too many connections) are transient
// — those are also stream-terminal since the stream carries one session.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
logger.Info().Err(err).Msg("heartbeat stream terminated, closing handler")
return
}
logger.Warn().Err(err).Msg("heartbeat check failed, retrying on same stream")
continue
}
// ValidateHeartbeat: per-tick behavioral check (rate limiting, bans).
if ix.ValidateHeartbeat != nil {
if err := ix.ValidateHeartbeat(*pid); err != nil {
logger.Warn().Err(err).Str("peer", pid.String()).Msg("heartbeat rejected, closing stream")
s.Reset()
return
}
}
ix.StreamMU.Lock()
// if record already seen update last seen
if rec, ok := streams[*pid]; ok {
rec.DID = hb.DID
// Preserve the existing UptimeTracker so TotalOnline accumulates correctly.
// hb.Stream is a fresh Stream with no UptimeTracker; carry the old one over.
oldTracker := rec.GetUptimeTracker()
rec.HeartbeatStream = hb.Stream
if oldTracker != nil {
rec.HeartbeatStream.UptimeTracker = oldTracker
} else {
rec.HeartbeatStream.UptimeTracker = &UptimeTracker{FirstSeen: time.Now().UTC()}
}
rec.HeartbeatStream.UptimeTracker.RecordHeartbeat()
rec.LastScore = hb.Score
logger.Info().Msg("A new node is updated : " + pid.String())
} else {
tracker := &UptimeTracker{FirstSeen: time.Now().UTC()}
tracker.RecordHeartbeat()
hb.Stream.UptimeTracker = tracker
streams[*pid] = &StreamRecord[T]{
DID: hb.DID,
HeartbeatStream: hb.Stream,
LastScore: hb.Score,
}
logger.Info().Msg("A new node is subscribed : " + pid.String())
}
ix.StreamMU.Unlock()
// Enrich hb.DID before calling the hook: nodes never set hb.DID directly;
// extract it from the embedded signed PeerRecord if available, then fall
// back to the DID stored by handleNodePublish in the stream record.
if hb.DID == "" && len(hb.Record) > 0 {
var partial struct {
DID string `json:"did"`
}
if json.Unmarshal(hb.Record, &partial) == nil && partial.DID != "" {
hb.DID = partial.DID
}
}
if hb.DID == "" {
ix.StreamMU.RLock()
if rec, ok := streams[*pid]; ok {
hb.DID = rec.DID
}
ix.StreamMU.RUnlock()
}
if ix.AfterHeartbeat != nil && hb.DID != "" {
ix.AfterHeartbeat(hb)
}
// Send response back to the node (bidirectional heartbeat).
if ix.BuildHeartbeatResponse != nil {
if resp := ix.BuildHeartbeatResponse(s.Conn().RemotePeer(), hb.Need, hb.Challenges, hb.ChallengeDID, hb.Referent); resp != nil {
s.SetWriteDeadline(time.Now().Add(3 * time.Second))
json.NewEncoder(s).Encode(resp)
s.SetWriteDeadline(time.Time{})
}
}
}
}
func CheckHeartbeat(h host.Host, s network.Stream, dec *json.Decoder, streams map[pp.ID]HeartBeatStreamed, lock *sync.RWMutex, maxNodes int) (*pp.ID, *Heartbeat, error) {
if len(h.Network().Peers()) >= maxNodes {
return nil, nil, fmt.Errorf("too many connections, try another indexer")
}
var hb Heartbeat
if err := dec.Decode(&hb); err != nil {
return nil, nil, err
}
_, bpms, latencyScore, _ := getBandwidthChallengeRate(h, s.Conn().RemotePeer(), MinPayloadChallenge+int(rand.Float64()*(MaxPayloadChallenge-MinPayloadChallenge)))
{
pid, err := pp.Decode(hb.PeerID)
if err != nil {
return nil, nil, err
}
uptimeRatio := float64(0)
age := time.Duration(0)
lock.Lock()
if rec, ok := streams[pid]; ok && rec.GetUptimeTracker() != nil {
uptimeRatio = rec.GetUptimeTracker().UptimeRatio()
age = rec.GetUptimeTracker().Uptime()
}
lock.Unlock()
// E: measure the indexer's own subnet diversity, not the node's view.
diversity := getOwnDiversityRate(h)
// fillRate: fraction of indexer capacity used — higher = more peers trust this indexer.
fillRate := 0.0
if maxNodes > 0 {
fillRate = float64(len(h.Network().Peers())) / float64(maxNodes)
if fillRate > 1 {
fillRate = 1
}
}
hb.ComputeIndexerScore(uptimeRatio, bpms, diversity, latencyScore, fillRate)
// B: dynamic minScore — starts at 20% for brand-new peers, ramps to 80% at 24h.
minScore := dynamicMinScore(age)
if hb.Score < minScore {
return nil, nil, errors.New("not enough trusting value")
}
hb.Stream = &Stream{
Name: hb.Name,
DID: hb.DID,
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
} // here is the long-lived bidirectional heartbeat.
return &pid, &hb, err
}
}

View File

@@ -3,178 +3,51 @@ package common
import (
"context"
"encoding/json"
"errors"
"fmt"
"oc-discovery/conf"
"strings"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
peer "cloud.o-forge.io/core/oc-lib/models/peer"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
type LongLivedStreamRecordedService[T interface{}] struct {
*LongLivedPubSubService
StreamRecords map[protocol.ID]map[pp.ID]*StreamRecord[T]
StreamMU sync.RWMutex
maxNodesConn int
isBidirectionnal bool
}
const (
ProtocolPublish = "/opencloud/record/publish/1.0"
ProtocolGet = "/opencloud/record/get/1.0"
)
func NewStreamRecordedService[T interface{}](h host.Host, maxNodesConn int, isBidirectionnal bool) *LongLivedStreamRecordedService[T] {
service := &LongLivedStreamRecordedService[T]{
LongLivedPubSubService: NewLongLivedPubSubService(h),
StreamRecords: map[protocol.ID]map[pp.ID]*StreamRecord[T]{},
maxNodesConn: maxNodesConn,
isBidirectionnal: isBidirectionnal,
}
go service.StartGC(30 * time.Second)
// Garbage collection is needed on every Map of Long-Lived Stream... it may be a top level redesigned
go service.Snapshot(1 * time.Hour)
return service
}
const ProtocolHeartbeat = "/opencloud/heartbeat/1.0"
func (ix *LongLivedStreamRecordedService[T]) StartGC(interval time.Duration) {
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
ix.gc()
}
}()
}
// ProtocolWitnessQuery is opened by a node to ask a peer what it thinks of a given indexer.
const ProtocolWitnessQuery = "/opencloud/witness/1.0"
func (ix *LongLivedStreamRecordedService[T]) gc() {
ix.StreamMU.Lock()
defer ix.StreamMU.Unlock()
now := time.Now().UTC()
if ix.StreamRecords[ProtocolHeartbeat] == nil {
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
return
}
streams := ix.StreamRecords[ProtocolHeartbeat]
// ProtocolSearchPeer is opened by a node toward one of its indexers to start a
// distributed peer search. The stream stays open; the indexer writes
// SearchPeerResult JSON objects as results arrive from the GossipSub mesh.
const ProtocolSearchPeer = "/opencloud/search/peer/1.0"
for pid, rec := range streams {
if now.After(rec.HeartbeatStream.Expiry) || now.Sub(rec.LastSeen) > 2*rec.HeartbeatStream.Expiry.Sub(now) {
for _, sstreams := range ix.StreamRecords {
if sstreams[pid] != nil {
delete(sstreams, pid)
}
}
ix.PubsubMu.Lock()
if ix.LongLivedPubSubs[TopicPubSubNodeActivity] != nil {
ad, err := pp.AddrInfoFromString("/ip4/" + conf.GetConfig().Hostname + " /tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + " /p2p/" + ix.Host.ID().String())
if err == nil {
if b, err := json.Marshal(TopicNodeActivityPub{
Disposer: *ad,
Name: rec.HeartbeatStream.Name,
DID: rec.HeartbeatStream.DID,
PeerID: pid.String(),
NodeActivity: peer.OFFLINE,
}); err == nil {
ix.LongLivedPubSubs[TopicPubSubNodeActivity].Publish(context.Background(), b)
}
}
}
ix.PubsubMu.Unlock()
}
}
}
// ProtocolSearchPeerResponse is opened by an indexer back toward the emitting
// indexer to deliver search results found in its referencedNodes.
const ProtocolSearchPeerResponse = "/opencloud/search/peer/response/1.0"
func (ix *LongLivedStreamRecordedService[T]) Snapshot(interval time.Duration) {
go func() {
logger := oclib.GetLogger()
t := time.NewTicker(interval)
defer t.Stop()
for range t.C {
infos := ix.snapshot()
for _, inf := range infos {
logger.Info().Msg(" -> " + inf.DID)
}
}
}()
}
// -------- Snapshot / Query --------
func (ix *LongLivedStreamRecordedService[T]) snapshot() []*StreamRecord[T] {
ix.StreamMU.Lock()
defer ix.StreamMU.Unlock()
out := make([]*StreamRecord[T], 0, len(ix.StreamRecords))
for _, streams := range ix.StreamRecords {
for _, stream := range streams {
out = append(out, stream)
}
}
return out
}
func (ix *LongLivedStreamRecordedService[T]) HandleNodeHeartbeat(s network.Stream) {
defer s.Close()
for {
pid, hb, err := CheckHeartbeat(ix.Host, s, ix.maxNodesConn)
if err != nil {
continue
}
ix.StreamMU.Lock()
if ix.StreamRecords[ProtocolHeartbeat] == nil {
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
}
streams := ix.StreamRecords[ProtocolHeartbeat]
// if record already seen update last seen
if rec, ok := streams[*pid]; ok {
rec.DID = hb.DID
rec.Stream = s
rec.HeartbeatStream = hb.Stream
rec.LastSeen = time.Now().UTC()
} else {
streams[*pid] = &StreamRecord[T]{
DID: hb.DID,
HeartbeatStream: hb.Stream,
Stream: s,
LastSeen: time.Now().UTC(),
}
}
ix.StreamMU.Unlock()
}
}
func CheckHeartbeat(h host.Host, s network.Stream, maxNodes int) (*pp.ID, *Heartbeat, error) {
if len(h.Network().Peers()) >= maxNodes {
return nil, nil, fmt.Errorf("too many connections, try another indexer")
}
var hb Heartbeat
if err := json.NewDecoder(s).Decode(&hb); err != nil {
return nil, nil, err
}
pid, err := pp.Decode(hb.PeerID)
hb.Stream = &Stream{
Name: hb.Name,
DID: hb.DID,
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
} // here is the long-lived bidirectionnal heart bit.
return &pid, &hb, err
}
type StreamRecord[T interface{}] struct {
DID string
HeartbeatStream *Stream
Stream network.Stream
Record T
LastSeen time.Time // to check expiry
}
// ProtocolBandwidthProbe is a dedicated short-lived stream used exclusively
// for bandwidth/latency measurement. The handler echoes any bytes it receives.
// All nodes and indexers register this handler so peers can measure them.
const ProtocolBandwidthProbe = "/opencloud/probe/1.0"
type Stream struct {
Name string `json:"name"`
DID string `json:"did"`
Stream network.Stream
Expiry time.Time `json:"expiry"`
UptimeTracker *UptimeTracker
}
func (s *Stream) GetUptimeTracker() *UptimeTracker {
return s.UptimeTracker
}
func NewStream[T interface{}](s network.Stream, did string, record T) *Stream {
@@ -185,93 +58,120 @@ func NewStream[T interface{}](s network.Stream, did string, record T) *Stream {
}
}
type ProtocolStream map[protocol.ID]map[pp.ID]*Stream
func (ps ProtocolStream) Get(protocol protocol.ID) map[pp.ID]*Stream {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
return ps[protocol]
type StreamRecord[T interface{}] struct {
DID string
HeartbeatStream *Stream
Record T
LastScore float64
}
func (ps ProtocolStream) Add(protocol protocol.ID, peerID *pp.ID, s *Stream) error {
if ps[protocol] == nil {
ps[protocol] = map[pp.ID]*Stream{}
}
if peerID != nil {
if s != nil {
ps[protocol][*peerID] = s
} else {
return errors.New("unable to add stream : stream missing")
}
}
func (s *StreamRecord[T]) GetUptimeTracker() *UptimeTracker {
if s.HeartbeatStream == nil {
return nil
}
return s.HeartbeatStream.UptimeTracker
}
func (ps ProtocolStream) Delete(protocol protocol.ID, peerID *pp.ID) {
if streams, ok := ps[protocol]; ok {
if peerID != nil && streams[*peerID] != nil {
streams[*peerID].Stream.Close()
delete(streams, *peerID)
} else {
for _, s := range ps {
for _, v := range s {
v.Stream.Close()
}
}
delete(ps, protocol)
}
}
type ProtocolInfo struct {
PersistantStream bool
WaitResponse bool
TTL time.Duration
}
const (
ProtocolPublish = "/opencloud/record/publish/1.0"
ProtocolGet = "/opencloud/record/get/1.0"
)
var StaticIndexers []*pp.AddrInfo = []*pp.AddrInfo{}
var StreamIndexers ProtocolStream = ProtocolStream{}
func ConnectToIndexers(h host.Host, minIndexer int, maxIndexer int, myPID pp.ID) {
logger := oclib.GetLogger()
ctx := context.Background()
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
if len(addresses) > maxIndexer {
addresses = addresses[0:maxIndexer]
}
for _, indexerAddr := range addresses {
ad, err := pp.AddrInfoFromString(indexerAddr)
if err != nil {
logger.Err(err)
continue
func TempStream(h host.Host, ad pp.AddrInfo, proto protocol.ID, did string, streams ProtocolStream, pts map[protocol.ID]*ProtocolInfo, mu *sync.RWMutex) (ProtocolStream, error) {
expiry := 2 * time.Second
if pts[proto] != nil {
expiry = pts[proto].TTL
}
ctxTTL, cancelTTL := context.WithTimeout(context.Background(), expiry)
defer cancelTTL()
if h.Network().Connectedness(ad.ID) != network.Connected {
if err := h.Connect(ctx, *ad); err != nil {
fmt.Println(err)
logger.Err(err)
continue
if err := h.Connect(ctxTTL, ad); err != nil {
return streams, err
}
}
StaticIndexers = append(StaticIndexers, ad)
// make a privilege streams with indexer.
for _, proto := range []protocol.ID{ProtocolPublish, ProtocolGet, ProtocolHeartbeat} {
AddStreamProtocol(nil, StreamIndexers, h, proto, ad.ID, myPID, true, nil)
}
}
if len(StaticIndexers) == 0 {
logger.Err(errors.New("you run a node without indexers... your gonna be isolated."))
}
if len(StaticIndexers) < minIndexer {
// TODO : ask for unknown indexer.
if streams[proto] != nil && streams[proto][ad.ID] != nil {
return streams, nil
} else if s, err := h.NewStream(ctxTTL, ad.ID, proto); err == nil {
mu.Lock()
if streams[proto] == nil {
streams[proto] = map[pp.ID]*Stream{}
}
SendHeartbeat(ctx, ProtocolHeartbeat, conf.GetConfig().Name, h, StreamIndexers, StaticIndexers, 20*time.Second) // your indexer is just like a node for the next indexer.
mu.Unlock()
time.AfterFunc(expiry, func() {
mu.Lock()
delete(streams[proto], ad.ID)
mu.Unlock()
})
mu.Lock()
streams[proto][ad.ID] = &Stream{
DID: did,
Stream: s,
Expiry: time.Now().UTC().Add(expiry),
}
mu.Unlock()
return streams, nil
} else {
return streams, err
}
}
func sendHeartbeat(ctx context.Context, h host.Host, proto protocol.ID, p *pp.AddrInfo,
hb Heartbeat, ps ProtocolStream, interval time.Duration) (*HeartbeatResponse, time.Duration, error) {
logger := oclib.GetLogger()
if ps[proto] == nil {
ps[proto] = map[pp.ID]*Stream{}
}
streams := ps[proto]
pss, exists := streams[p.ID]
ctxTTL, cancel := context.WithTimeout(ctx, 3*interval)
defer cancel()
if h.Network().Connectedness(p.ID) != network.Connected {
if err := h.Connect(ctxTTL, *p); err != nil {
logger.Err(err)
return nil, 0, err
}
exists = false
}
if !exists || pss.Stream == nil {
logger.Info().Msg("New Stream engaged as Heartbeat " + fmt.Sprintf("%v", proto) + " " + p.ID.String())
s, err := h.NewStream(ctx, p.ID, proto)
if err != nil {
fmt.Println(s, err, p.ID)
logger.Err(err)
return nil, 0, err
}
pss = &Stream{
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
}
streams[p.ID] = pss
}
sentAt := time.Now()
if err := json.NewEncoder(pss.Stream).Encode(&hb); err != nil {
pss.Stream.Close()
pss.Stream = nil
return nil, 0, err
}
pss.Expiry = time.Now().UTC().Add(2 * time.Minute)
// Try to read a response (indexers that support bidirectional heartbeat respond).
pss.Stream.SetReadDeadline(time.Now().Add(5 * time.Second))
var resp HeartbeatResponse
rtt := time.Since(sentAt)
if err := json.NewDecoder(pss.Stream).Decode(&resp); err == nil {
rtt = time.Since(sentAt)
pss.Stream.SetReadDeadline(time.Time{})
return &resp, rtt, nil
}
pss.Stream.SetReadDeadline(time.Time{})
return nil, rtt, nil
}
func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host, proto protocol.ID, id pp.ID, mypid pp.ID, force bool, onStreamCreated *func(network.Stream)) ProtocolStream {
logger := oclib.GetLogger()
if onStreamCreated == nil {
f := func(s network.Stream) {
protoS[proto][id] = &Stream{
@@ -294,7 +194,7 @@ func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host,
if protoS[proto][id] != nil {
protoS[proto][id].Expiry = time.Now().Add(2 * time.Minute)
} else {
fmt.Println("GENERATE STREAM", proto, id)
logger.Info().Msg("NEW STREAM Generated" + fmt.Sprintf("%v", proto) + " " + id.String())
s, err := h.NewStream(*ctx, id, proto)
if err != nil {
panic(err.Error())
@@ -304,99 +204,3 @@ func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host,
}
return protoS
}
type Heartbeat struct {
Name string `json:"name"`
Stream *Stream `json:"stream"`
DID string `json:"did"`
PeerID string `json:"peer_id"`
Timestamp int64 `json:"timestamp"`
}
type HeartbeatInfo []struct {
Info []byte `json:"info"`
}
const ProtocolHeartbeat = "/opencloud/heartbeat/1.0"
func SendHeartbeat(ctx context.Context, proto protocol.ID, name string, h host.Host, ps ProtocolStream, peers []*pp.AddrInfo, interval time.Duration) {
peerID, err := oclib.GenerateNodeID()
if err == nil {
panic("can't heartbeat daemon failed to start")
}
go func() {
t := time.NewTicker(interval)
defer t.Stop()
for {
select {
case <-t.C:
hb := Heartbeat{
Name: name,
DID: peerID,
PeerID: h.ID().String(),
Timestamp: time.Now().UTC().Unix(),
}
for _, ix := range peers {
_ = sendHeartbeat(ctx, h, proto, ix, hb, ps, interval*time.Second)
}
case <-ctx.Done():
return
}
}
}()
}
func sendHeartbeat(ctx context.Context, h host.Host, proto protocol.ID, p *pp.AddrInfo,
hb Heartbeat, ps ProtocolStream, interval time.Duration) error {
streams := ps.Get(proto)
if len(streams) == 0 {
return errors.New("no stream for protocol heartbeat founded")
}
pss, exists := streams[p.ID]
ctxTTL, _ := context.WithTimeout(ctx, 3*interval)
// Connect si nécessaire
if h.Network().Connectedness(p.ID) != network.Connected {
_ = h.Connect(ctxTTL, *p)
exists = false // on devra recréer le stream
}
// Crée le stream si inexistant ou fermé
if !exists || pss.Stream == nil {
s, err := h.NewStream(ctx, p.ID, proto)
if err != nil {
return err
}
pss = &Stream{
Stream: s,
Expiry: time.Now().UTC().Add(2 * time.Minute),
}
streams[p.ID] = pss
}
// Envoie le heartbeat
ss := json.NewEncoder(pss.Stream)
err := ss.Encode(&hb)
if err != nil {
pss.Stream.Close()
pss.Stream = nil // recréera au prochain tick
return err
}
pss.Expiry = time.Now().UTC().Add(2 * time.Minute)
return nil
}
/*
func SearchPeer(search string) ([]*peer.Peer, error) {
ps := []*peer.Peer{}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
peers := access.Search(nil, search, false)
if len(peers.Data) == 0 {
return ps, errors.New("no self available")
}
for _, p := range peers.Data {
ps = append(ps, p.(*peer.Peer))
}
return ps, nil
}
*/

View File

@@ -0,0 +1,199 @@
package common
import (
"context"
"encoding/json"
"sort"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
)
// ProtocolIndexerCandidates is opened by a node toward its remaining indexers
// to request candidate replacement indexers after an ejection event.
const ProtocolIndexerCandidates = "/opencloud/indexer/candidates/1.0"
// IndexerCandidatesRequest is sent by a node to one of its indexers.
// Count is how many candidates are needed.
type IndexerCandidatesRequest struct {
Count int `json:"count"`
}
// IndexerCandidatesResponse carries a random sample of known indexers from
// the responding indexer's DHT cache.
type IndexerCandidatesResponse struct {
Candidates []pp.AddrInfo `json:"candidates"`
}
// TriggerConsensus asks each remaining indexer for a random pool of candidates,
// scores them asynchronously via a one-shot probe heartbeat, and admits the
// best ones to StaticIndexers. Falls back to DHT replenishment for any gap.
//
// Must be called in a goroutine — it blocks until all probes have returned
// (or timed out), which can take up to ~10s.
func TriggerConsensus(h host.Host, remaining []pp.AddrInfo, need int) {
if need <= 0 || len(remaining) == 0 {
return
}
logger := oclib.GetLogger()
logger.Info().Int("voters", len(remaining)).Int("need", need).
Msg("[consensus] starting indexer candidate consensus")
// Phase 1 — collect candidates from all remaining indexers in parallel.
type collectResult struct{ candidates []pp.AddrInfo }
collectCh := make(chan collectResult, len(remaining))
for _, ai := range remaining {
go func(ai pp.AddrInfo) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := h.NewStream(ctx, ai.ID, ProtocolIndexerCandidates)
if err != nil {
collectCh <- collectResult{}
return
}
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
if err := json.NewEncoder(s).Encode(IndexerCandidatesRequest{Count: need + 2}); err != nil {
collectCh <- collectResult{}
return
}
var resp IndexerCandidatesResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
collectCh <- collectResult{}
return
}
collectCh <- collectResult{candidates: resp.Candidates}
}(ai)
}
// Merge and deduplicate, excluding indexers already in the pool.
seen := map[pp.ID]struct{}{}
for _, ai := range Indexers.GetAddrIDs() {
seen[ai] = struct{}{}
}
var candidates []pp.AddrInfo
for range remaining {
r := <-collectCh
for _, ai := range r.candidates {
if _, dup := seen[ai.ID]; !dup {
seen[ai.ID] = struct{}{}
candidates = append(candidates, ai)
}
}
}
if len(candidates) == 0 {
logger.Info().Msg("[consensus] no candidates from voters, falling back to DHT")
replenishIndexersFromDHT(h, need)
return
}
logger.Info().Int("candidates", len(candidates)).Msg("[consensus] scoring candidates")
// Phase 2 — score all candidates in parallel via a one-shot probe heartbeat.
type scoreResult struct {
ai pp.AddrInfo
score float64
}
scoreCh := make(chan scoreResult, len(candidates))
for _, ai := range candidates {
go func(ai pp.AddrInfo) {
resp, rtt, err := probeIndexer(h, ai)
if err != nil {
scoreCh <- scoreResult{ai: ai, score: 0}
return
}
scoreCh <- scoreResult{ai: ai, score: quickScore(resp, rtt)}
}(ai)
}
results := make([]scoreResult, 0, len(candidates))
for range candidates {
results = append(results, <-scoreCh)
}
// Sort descending by quick score, admit top `need` above the minimum bar.
sort.Slice(results, func(i, j int) bool { return results[i].score > results[j].score })
minQ := dynamicMinScore(0) // fresh peer: threshold starts at 20
admitted := 0
for _, res := range results {
if admitted >= need {
break
}
if res.score < minQ {
break // sorted desc: everything after is worse
}
key := addrKey(res.ai)
if Indexers.ExistsAddr(key) {
continue // already in pool (race with heartbeat path)
}
cpy := res.ai
Indexers.SetAddr(key, &cpy)
admitted++
}
if admitted > 0 {
logger.Info().Int("admitted", admitted).Msg("[consensus] candidates admitted to pool")
Indexers.NudgeIt()
}
// Fill any remaining gap with DHT discovery.
if gap := need - admitted; gap > 0 {
logger.Info().Int("gap", gap).Msg("[consensus] gap after consensus, falling back to DHT")
replenishIndexersFromDHT(h, gap)
}
}
// probeIndexer dials the candidate, sends one lightweight heartbeat, and
// returns the HeartbeatResponse (nil if the indexer doesn't support it) and RTT.
func probeIndexer(h host.Host, ai pp.AddrInfo) (*HeartbeatResponse, time.Duration, error) {
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
defer cancel()
if h.Network().Connectedness(ai.ID) != network.Connected {
if err := h.Connect(ctx, ai); err != nil {
return nil, 0, err
}
}
s, err := h.NewStream(ctx, ai.ID, ProtocolHeartbeat)
if err != nil {
return nil, 0, err
}
defer s.Close()
hb := Heartbeat{PeerID: h.ID().String(), Timestamp: time.Now().UTC().Unix()}
s.SetWriteDeadline(time.Now().Add(3 * time.Second))
if err := json.NewEncoder(s).Encode(hb); err != nil {
return nil, 0, err
}
s.SetWriteDeadline(time.Time{})
sentAt := time.Now()
s.SetReadDeadline(time.Now().Add(5 * time.Second))
var resp HeartbeatResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
// Indexer connected but no response: connection itself is the signal.
return nil, time.Since(sentAt), nil
}
return &resp, time.Since(sentAt), nil
}
// quickScore computes a lightweight score [0,100] from a probe result.
// Uses only fill rate (inverse) and latency — the two signals available
// without a full heartbeat history.
func quickScore(resp *HeartbeatResponse, rtt time.Duration) float64 {
maxRTT := BaseRoundTrip * 10
latencyScore := 1.0 - float64(rtt)/float64(maxRTT)
if latencyScore < 0 {
latencyScore = 0
}
if resp == nil {
// Connection worked but no response (old indexer): moderate score.
return latencyScore * 50
}
fillScore := 1.0 - resp.FillRate // prefer less-loaded indexers
return (0.5*latencyScore + 0.5*fillScore) * 100
}

View File

@@ -2,12 +2,8 @@ package common
import (
"bytes"
"crypto/ed25519"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"errors"
"fmt"
"oc-discovery/conf"
"oc-discovery/models"
"os"
@@ -47,45 +43,6 @@ func Verify(pub crypto.PubKey, data, sig []byte) (bool, error) {
return pub.Verify(data, sig)
}
func LoadKeyFromFilePrivate() (crypto.PrivKey, error) {
path := conf.GetConfig().PrivateKeyPath
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
block, _ := pem.Decode(data)
keyAny, err := x509.ParsePKCS8PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
edKey, ok := keyAny.(ed25519.PrivateKey)
if !ok {
return nil, fmt.Errorf("not an ed25519 key")
}
return crypto.UnmarshalEd25519PrivateKey(edKey)
}
func LoadKeyFromFilePublic() (crypto.PubKey, error) {
path := conf.GetConfig().PublicKeyPath
data, err := os.ReadFile(path)
if err != nil {
return nil, err
}
block, _ := pem.Decode(data)
keyAny, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, err
}
edKey, ok := keyAny.(ed25519.PublicKey)
if !ok {
return nil, fmt.Errorf("not an ed25519 key")
}
// Try to unmarshal as libp2p private key (supports ed25519, rsa, etc.)
return crypto.UnmarshalEd25519PublicKey(edKey)
}
func LoadPSKFromFile() (pnet.PSK, error) {
path := conf.GetConfig().PSKPath
data, err := os.ReadFile(path)

View File

@@ -0,0 +1,219 @@
package common
import (
"context"
"math/rand"
"strings"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"github.com/ipfs/go-cid"
dht "github.com/libp2p/go-libp2p-kad-dht"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
mh "github.com/multiformats/go-multihash"
)
// FilterLoopbackAddrs strips loopback (127.x, ::1) and unspecified addresses
// from an AddrInfo so we never hand peers an address they cannot dial externally.
func FilterLoopbackAddrs(ai pp.AddrInfo) pp.AddrInfo {
filtered := make([]ma.Multiaddr, 0, len(ai.Addrs))
for _, addr := range ai.Addrs {
ip, err := ExtractIP(addr.String())
if err != nil || ip.IsLoopback() || ip.IsUnspecified() {
continue
}
filtered = append(filtered, addr)
}
return pp.AddrInfo{ID: ai.ID, Addrs: filtered}
}
// RecommendedHeartbeatInterval is the target period between heartbeat ticks.
// Indexers use this as the DHT Provide refresh interval.
const RecommendedHeartbeatInterval = 60 * time.Second
// discoveryDHT is the DHT instance used for indexer discovery.
// Set by SetDiscoveryDHT once the indexer service initialises its DHT.
var discoveryDHT *dht.IpfsDHT
// SetDiscoveryDHT stores the DHT instance used by replenishIndexersFromDHT.
// Called by NewIndexerService once the DHT is ready.
func SetDiscoveryDHT(d *dht.IpfsDHT) {
discoveryDHT = d
}
// initNodeDHT creates a lightweight DHT client for pure nodes (no IndexerService).
// Uses the seed indexers as bootstrap peers. Called lazily by ConnectToIndexers
// when discoveryDHT is still nil after the initial warm-up delay.
func initNodeDHT(h host.Host, seeds []Entry) {
logger := oclib.GetLogger()
bootstrapPeers := []pp.AddrInfo{}
for _, s := range seeds {
bootstrapPeers = append(bootstrapPeers, *s.Info)
}
d, err := dht.New(context.Background(), h,
dht.Mode(dht.ModeClient),
dht.ProtocolPrefix("oc"),
dht.BootstrapPeers(bootstrapPeers...),
)
if err != nil {
logger.Warn().Err(err).Msg("[dht] node DHT client init failed")
return
}
SetDiscoveryDHT(d)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
defer cancel()
if err := d.Bootstrap(ctx); err != nil {
logger.Warn().Err(err).Msg("[dht] node DHT client bootstrap failed")
}
logger.Info().Msg("[dht] node DHT client ready")
}
// IndexerCID returns the well-known CID under which all indexers advertise.
func IndexerCID() cid.Cid {
h, _ := mh.Sum([]byte("/opencloud/indexers"), mh.SHA2_256, -1)
return cid.NewCidV1(cid.Raw, h)
}
// DiscoverIndexersFromDHT uses the DHT to find up to count indexers advertising
// under the well-known key. Excludes self. Resolves addresses when the provider
// record carries none.
func DiscoverIndexersFromDHT(h host.Host, d *dht.IpfsDHT, count int) []pp.AddrInfo {
logger := oclib.GetLogger()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
c := IndexerCID()
ch := d.FindProvidersAsync(ctx, c, count*2)
seen := map[pp.ID]struct{}{}
var results []pp.AddrInfo
for ai := range ch {
if ai.ID == h.ID() {
continue
}
if _, dup := seen[ai.ID]; dup {
continue
}
seen[ai.ID] = struct{}{}
if len(ai.Addrs) == 0 {
resolved, err := d.FindPeer(ctx, ai.ID)
if err != nil {
logger.Warn().Str("peer", ai.ID.String()).Msg("[dht] no addrs and FindPeer failed, skipping")
continue
}
ai = resolved
}
ai = FilterLoopbackAddrs(ai)
if len(ai.Addrs) == 0 {
continue
}
results = append(results, ai)
if len(results) >= count {
break
}
}
logger.Info().Int("found", len(results)).Msg("[dht] indexer discovery complete")
return results
}
// SelectByFillRate picks up to want providers using fill-rate weighted random
// selection w(F) = F*(1-F) — peaks at F=0.5, prefers less-loaded indexers.
// Providers with unknown fill rate receive F=0.5 (neutral prior).
// Enforces subnet /24 diversity: at most one indexer per /24.
func SelectByFillRate(providers []pp.AddrInfo, fillRates map[pp.ID]float64, want int) []pp.AddrInfo {
if len(providers) == 0 || want <= 0 {
return nil
}
type weighted struct {
ai pp.AddrInfo
weight float64
}
ws := make([]weighted, 0, len(providers))
for _, ai := range providers {
f, ok := fillRates[ai.ID]
if !ok {
f = 0.5
}
ws = append(ws, weighted{ai: ai, weight: f * (1 - f)})
}
// Shuffle first for fairness among equal-weight peers.
rand.Shuffle(len(ws), func(i, j int) { ws[i], ws[j] = ws[j], ws[i] })
// Sort descending by weight (simple insertion sort — small N).
for i := 1; i < len(ws); i++ {
for j := i; j > 0 && ws[j].weight > ws[j-1].weight; j-- {
ws[j], ws[j-1] = ws[j-1], ws[j]
}
}
subnets := map[string]struct{}{}
var selected []pp.AddrInfo
for _, w := range ws {
if len(selected) >= want {
break
}
subnet := subnetOf(w.ai)
if subnet != "" {
if _, dup := subnets[subnet]; dup {
continue
}
subnets[subnet] = struct{}{}
}
selected = append(selected, w.ai)
}
return selected
}
// subnetOf returns the /24 subnet string for the first non-loopback address of ai.
func subnetOf(ai pp.AddrInfo) string {
for _, ma := range ai.Addrs {
ip, err := ExtractIP(ma.String())
if err != nil || ip.IsLoopback() {
continue
}
parts := strings.Split(ip.String(), ".")
if len(parts) >= 3 {
return parts[0] + "." + parts[1] + "." + parts[2]
}
}
return ""
}
// replenishIndexersFromDHT is called when an indexer heartbeat fails and more
// indexers are needed. Queries the DHT and adds fresh entries to StaticIndexers.
func replenishIndexersFromDHT(h host.Host, need int) {
if need <= 0 || discoveryDHT == nil {
return
}
logger := oclib.GetLogger()
logger.Info().Int("need", need).Msg("[dht] replenishing indexer pool from DHT")
providers := DiscoverIndexersFromDHT(h, discoveryDHT, need*3)
selected := SelectByFillRate(providers, nil, need)
if len(selected) == 0 {
logger.Warn().Msg("[dht] no indexers found in DHT for replenishment")
return
}
added := 0
for _, ai := range selected {
addr := addrKey(ai)
if !Indexers.ExistsAddr(addr) {
adCopy := ai
Indexers.SetAddr(addr, &adCopy)
added++
}
}
if added > 0 {
logger.Info().Int("added", added).Msg("[dht] indexers added from DHT")
Indexers.NudgeIt()
}
}
// addrKey returns the canonical map key for an AddrInfo.
// The PeerID is used as key so the same peer is never stored twice regardless
// of which of its addresses was seen first.
func addrKey(ai pp.AddrInfo) string {
return ai.ID.String()
}

View File

@@ -4,8 +4,14 @@ import (
"context"
"cloud.o-forge.io/core/oc-lib/models/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
)
type HeartBeatStreamed interface {
GetUptimeTracker() *UptimeTracker
}
type DiscoveryPeer interface {
GetPeerRecord(ctx context.Context, key string) ([]*peer.Peer, error)
GetPubSub(topicName string) *pubsub.Topic
}

View File

@@ -0,0 +1,68 @@
package common
import (
"context"
"fmt"
"math/rand"
"net"
"time"
"github.com/libp2p/go-libp2p/core/host"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
)
func PeerIsAlive(h host.Host, ad pp.AddrInfo) bool {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
err := h.Connect(ctx, ad)
return err == nil
}
func ExtractIP(addr string) (net.IP, error) {
ma, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return nil, err
}
ipStr, err := ma.ValueForProtocol(multiaddr.P_IP4)
if err != nil {
ipStr, err = ma.ValueForProtocol(multiaddr.P_IP6)
if err != nil {
return nil, err
}
}
ip := net.ParseIP(ipStr)
if ip == nil {
return nil, fmt.Errorf("invalid IP: %s", ipStr)
}
return ip, nil
}
func GetIndexer(addrOrId string) *pp.AddrInfo {
return Indexers.GetAddr(addrOrId)
}
func GetIndexersIDs() []pp.ID {
return Indexers.GetAddrIDs()
}
func GetIndexersStr() []string {
return Indexers.GetAddrsStr()
}
func GetIndexers() []*pp.AddrInfo {
entries := Indexers.GetAddrs()
result := make([]*pp.AddrInfo, 0, len(entries))
for _, e := range entries {
result = append(result, e.Info)
}
return result
}
func Shuffle[T any](slice []T) []T {
rand.Shuffle(len(slice), func(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
})
return slice
}

View File

@@ -0,0 +1,137 @@
package node
import (
"context"
"encoding/json"
"time"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/indexer"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"github.com/libp2p/go-libp2p/core/control"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
ma "github.com/multiformats/go-multiaddr"
)
// OCConnectionGater enforces two rules on every inbound connection:
// 1. If the peer is known locally and blacklisted → reject.
// 2. If the peer is unknown locally → ask indexers one by one whether it
// exists in the DHT. Accept as soon as one confirms it; reject if none do
// (or if no indexers are reachable yet, allow optimistically).
//
// Outbound connections are always allowed — we chose to dial them.
type OCConnectionGater struct {
host host.Host
}
func newOCConnectionGater(h host.Host) *OCConnectionGater {
return &OCConnectionGater{host: h}
}
// InterceptPeerDial — allow all outbound dials.
func (g *OCConnectionGater) InterceptPeerDial(_ pp.ID) bool { return true }
// InterceptAddrDial — allow all outbound dials.
func (g *OCConnectionGater) InterceptAddrDial(_ pp.ID, _ ma.Multiaddr) bool { return true }
// InterceptAccept — allow at transport level (PeerID not yet known).
func (g *OCConnectionGater) InterceptAccept(_ network.ConnMultiaddrs) bool { return true }
// InterceptUpgraded — final gate; always allow (decisions already made in InterceptSecured).
func (g *OCConnectionGater) InterceptUpgraded(_ network.Conn) (bool, control.DisconnectReason) {
return true, 0
}
// InterceptSecured is called after the cryptographic handshake — PeerID is now known.
// Only inbound connections are verified; outbound are trusted.
func (g *OCConnectionGater) InterceptSecured(dir network.Direction, pid pp.ID, _ network.ConnMultiaddrs) bool {
if dir == network.DirOutbound {
return true
}
logger := oclib.GetLogger()
// 1. Local DB lookup by PeerID.
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
results := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // search by name if no filters are provided
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
},
}, pid.String(), false)
for _, item := range results.Data {
p, ok := item.(*peer.Peer)
if !ok || p.PeerID != pid.String() {
continue
}
if p.Relation == peer.BLACKLIST {
logger.Warn().Str("peer", pid.String()).Msg("[gater] rejected blacklisted peer")
return false
}
// Known, not blacklisted.
return true
}
// 2. Unknown locally — verify via indexers.
indexers := common.Indexers.GetAddrs()
if len(indexers) == 0 {
// No indexers reachable yet — allow optimistically (bootstrap phase).
logger.Warn().Str("peer", pid.String()).Msg("[gater] no indexers available, allowing unverified inbound")
return true
}
req := indexer.GetValue{PeerID: pid.String()}
// A single DHT GetValue already traverses the entire DHT network, so asking
// a second indexer would yield the same result. We only fall through to the
// next indexer if the current one is unreachable (transport error), not if
// it returns found=false (that answer is already DHT-wide authoritative).
for _, ai := range indexers {
found, reachable := queryIndexerPeerExists(g.host, *ai.Info, req)
if !reachable {
continue // indexer down — try next
}
if !found {
logger.Warn().Str("peer", pid.String()).Msg("[gater] peer not found in DHT, rejecting inbound")
}
return found // definitive DHT answer
}
// All indexers unreachable — allow optimistically rather than blocking indefinitely.
logger.Warn().Str("peer", pid.String()).Msg("[gater] all indexers unreachable, allowing unverified inbound")
return true
}
// queryIndexerPeerExists opens a fresh one-shot stream to ai, sends a GetValue
// request, and returns (found, reachable).
// reachable=false means the indexer could not be reached (transport error);
// the caller should then try another indexer.
// reachable=true means the indexer answered — found is the DHT-wide authoritative result.
func queryIndexerPeerExists(h host.Host, ai pp.AddrInfo, req indexer.GetValue) (found, reachable bool) {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
defer cancel()
if h.Network().Connectedness(ai.ID) != network.Connected {
if err := h.Connect(ctx, ai); err != nil {
return false, false
}
}
s, err := h.NewStream(ctx, ai.ID, common.ProtocolGet)
if err != nil {
return false, false
}
defer s.Close()
s.SetDeadline(time.Now().Add(3 * time.Second))
if err := json.NewEncoder(s).Encode(req); err != nil {
return false, false
}
var resp indexer.GetResponse
if err := json.NewDecoder(s).Decode(&resp); err != nil {
return false, false
}
return resp.Found, true
}

View File

@@ -0,0 +1,254 @@
package indexer
import (
"errors"
"sync"
"time"
"oc-discovery/conf"
pp "github.com/libp2p/go-libp2p/core/peer"
)
// ── defaults ──────────────────────────────────────────────────────────────────
const (
defaultMaxConnPerWindow = 20
defaultConnWindowSecs = 30
defaultMaxHBPerMinute = 5
defaultMaxPublishPerMin = 10
defaultMaxGetPerMin = 50
strikeThreshold = 3
banDuration = 10 * time.Minute
behaviorWindowDur = 60 * time.Second
)
func cfgOr(v, def int) int {
if v > 0 {
return v
}
return def
}
// ── ConnectionRateGuard ───────────────────────────────────────────────────────
// ConnectionRateGuard limits the number of NEW incoming connections accepted
// within a sliding time window. It protects public indexers against coordinated
// registration floods (Sybil bursts).
type ConnectionRateGuard struct {
mu sync.Mutex
window []time.Time
maxInWindow int
windowDur time.Duration
}
func newConnectionRateGuard() *ConnectionRateGuard {
cfg := conf.GetConfig()
return &ConnectionRateGuard{
maxInWindow: cfgOr(cfg.MaxConnPerWindow, defaultMaxConnPerWindow),
windowDur: time.Duration(cfgOr(cfg.ConnWindowSecs, defaultConnWindowSecs)) * time.Second,
}
}
// Allow returns true if a new connection may be accepted.
// The internal window is pruned on each call so memory stays bounded.
func (g *ConnectionRateGuard) Allow() bool {
g.mu.Lock()
defer g.mu.Unlock()
now := time.Now()
cutoff := now.Add(-g.windowDur)
i := 0
for i < len(g.window) && g.window[i].Before(cutoff) {
i++
}
g.window = g.window[i:]
if len(g.window) >= g.maxInWindow {
return false
}
g.window = append(g.window, now)
return true
}
// ── per-node state ────────────────────────────────────────────────────────────
type nodeBehavior struct {
mu sync.Mutex
knownDID string
hbTimes []time.Time
pubTimes []time.Time
getTimes []time.Time
strikes int
bannedUntil time.Time
}
func (nb *nodeBehavior) isBanned() bool {
return time.Now().Before(nb.bannedUntil)
}
func (nb *nodeBehavior) strike(n int) {
nb.strikes += n
if nb.strikes >= strikeThreshold {
nb.bannedUntil = time.Now().Add(banDuration)
}
}
func pruneWindow(ts []time.Time, dur time.Duration) []time.Time {
cutoff := time.Now().Add(-dur)
i := 0
for i < len(ts) && ts[i].Before(cutoff) {
i++
}
return ts[i:]
}
// recordInWindow appends now to the window slice and returns false (+ adds a
// strike) when the count exceeds max.
func (nb *nodeBehavior) recordInWindow(ts *[]time.Time, max int) bool {
*ts = pruneWindow(*ts, behaviorWindowDur)
if len(*ts) >= max {
nb.strike(1)
return false
}
*ts = append(*ts, time.Now())
return true
}
// ── NodeBehaviorTracker ───────────────────────────────────────────────────────
// NodeBehaviorTracker is the indexer-side per-node compliance monitor.
// It is entirely local: no state is shared with other indexers.
type NodeBehaviorTracker struct {
mu sync.RWMutex
nodes map[pp.ID]*nodeBehavior
maxHB int
maxPub int
maxGet int
}
func newNodeBehaviorTracker() *NodeBehaviorTracker {
cfg := conf.GetConfig()
return &NodeBehaviorTracker{
nodes: make(map[pp.ID]*nodeBehavior),
maxHB: cfgOr(cfg.MaxHBPerMinute, defaultMaxHBPerMinute),
maxPub: cfgOr(cfg.MaxPublishPerMinute, defaultMaxPublishPerMin),
maxGet: cfgOr(cfg.MaxGetPerMinute, defaultMaxGetPerMin),
}
}
func (t *NodeBehaviorTracker) get(pid pp.ID) *nodeBehavior {
t.mu.RLock()
nb := t.nodes[pid]
t.mu.RUnlock()
if nb != nil {
return nb
}
t.mu.Lock()
defer t.mu.Unlock()
if nb = t.nodes[pid]; nb == nil {
nb = &nodeBehavior{}
t.nodes[pid] = nb
}
return nb
}
// IsBanned returns true when the peer is in an active ban period.
func (t *NodeBehaviorTracker) IsBanned(pid pp.ID) bool {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
return nb.isBanned()
}
// RecordHeartbeat checks heartbeat cadence. Returns an error if the peer is
// flooding (too many heartbeats in the sliding window).
func (t *NodeBehaviorTracker) RecordHeartbeat(pid pp.ID) error {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.isBanned() {
return errors.New("peer is banned")
}
if !nb.recordInWindow(&nb.hbTimes, t.maxHB) {
return errors.New("heartbeat flood detected")
}
return nil
}
// CheckIdentity verifies that the DID associated with a PeerID never changes.
// A DID change is a strong signal of identity spoofing.
func (t *NodeBehaviorTracker) CheckIdentity(pid pp.ID, did string) error {
if did == "" {
return nil
}
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.knownDID == "" {
nb.knownDID = did
return nil
}
if nb.knownDID != did {
nb.strike(2) // identity change is severe
return errors.New("DID mismatch for peer " + pid.String())
}
return nil
}
// RecordBadSignature registers a cryptographic verification failure.
// A single bad signature is worth 2 strikes (near-immediate ban).
func (t *NodeBehaviorTracker) RecordBadSignature(pid pp.ID) {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
nb.strike(2)
}
// RecordPublish checks publish volume. Returns an error if the peer is
// sending too many publish requests.
func (t *NodeBehaviorTracker) RecordPublish(pid pp.ID) error {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.isBanned() {
return errors.New("peer is banned")
}
if !nb.recordInWindow(&nb.pubTimes, t.maxPub) {
return errors.New("publish volume exceeded")
}
return nil
}
// RecordGet checks get volume. Returns an error if the peer is enumerating
// the DHT at an abnormal rate.
func (t *NodeBehaviorTracker) RecordGet(pid pp.ID) error {
nb := t.get(pid)
nb.mu.Lock()
defer nb.mu.Unlock()
if nb.isBanned() {
return errors.New("peer is banned")
}
if !nb.recordInWindow(&nb.getTimes, t.maxGet) {
return errors.New("get volume exceeded")
}
return nil
}
// Cleanup removes the behavior entry for a peer if it is not currently banned.
// Called when the peer is evicted from StreamRecords by the GC.
func (t *NodeBehaviorTracker) Cleanup(pid pp.ID) {
t.mu.RLock()
nb := t.nodes[pid]
t.mu.RUnlock()
if nb == nil {
return
}
nb.mu.Lock()
banned := nb.isBanned()
nb.mu.Unlock()
if !banned {
t.mu.Lock()
delete(t.nodes, pid)
t.mu.Unlock()
}
}

View File

@@ -5,9 +5,10 @@ import (
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"oc-discovery/conf"
"io"
"math/rand"
"oc-discovery/daemons/node/common"
"strings"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
@@ -16,59 +17,45 @@ import (
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
"github.com/libp2p/go-libp2p/core/peer"
lpp "github.com/libp2p/go-libp2p/core/peer"
)
type PeerRecord struct {
type PeerRecordPayload struct {
Name string `json:"name"`
DID string `json:"did"` // real PEER ID
PeerID string `json:"peer_id"`
DID string `json:"did"`
PubKey []byte `json:"pub_key"`
ExpiryDate time.Time `json:"expiry_date"`
}
type PeerRecord struct {
PeerRecordPayload
PeerID string `json:"peer_id"`
APIUrl string `json:"api_url"`
StreamAddress string `json:"stream_address"`
NATSAddress string `json:"nats_address"`
WalletAddress string `json:"wallet_address"`
Signature []byte `json:"signature"`
ExpiryDate time.Time `json:"expiry_date"`
TTL int `json:"ttl"` // max of hop diffusion
NoPub bool `json:"no_pub"`
}
func (p *PeerRecord) Sign() error {
priv, err := common.LoadKeyFromFilePrivate()
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return err
}
dht := PeerRecord{
Name: p.Name,
DID: p.DID,
PubKey: p.PubKey,
ExpiryDate: p.ExpiryDate,
}
payload, _ := json.Marshal(dht)
payload, _ := json.Marshal(p.PeerRecordPayload)
b, err := common.Sign(priv, payload)
p.Signature = b
return err
}
func (p *PeerRecord) Verify() (crypto.PubKey, error) {
fmt.Println(p.PubKey)
pubKey, err := crypto.UnmarshalPublicKey(p.PubKey) // retrieve pub key in message
if err != nil {
fmt.Println("UnmarshalPublicKey")
return pubKey, err
}
dht := PeerRecord{
Name: p.Name,
DID: p.DID,
PubKey: p.PubKey,
ExpiryDate: p.ExpiryDate,
}
payload, _ := json.Marshal(dht)
payload, _ := json.Marshal(p.PeerRecordPayload)
if ok, _ := common.Verify(pubKey, payload, p.Signature); !ok { // verify minimal message was sign per pubKey
fmt.Println("Verify")
if ok, _ := pubKey.Verify(payload, p.Signature); !ok { // verify minimal message was sign per pubKey
return pubKey, errors.New("invalid signature")
}
return pubKey, nil
@@ -79,7 +66,6 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
if err != nil {
return false, nil, err
}
fmt.Println("ExtractPeer MarshalPublicKey")
rel := pp.NONE
if ourkey == key { // at this point is PeerID is same as our... we are... thats our peer INFO
rel = pp.SELF
@@ -90,7 +76,6 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
UUID: pr.DID,
Name: pr.Name,
},
State: pp.ONLINE,
Relation: rel, // VERIFY.... it crush nothing
PeerID: pr.PeerID,
PublicKey: base64.StdEncoding.EncodeToString(pubBytes),
@@ -99,20 +84,7 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
NATSAddress: pr.NATSAddress,
WalletAddress: pr.WalletAddress,
}
if time.Now().UTC().After(pr.ExpiryDate) { // is expired
p.State = pp.OFFLINE // then is considers OFFLINE
}
b, err := json.Marshal(p)
if err != nil {
return pp.SELF == p.Relation, nil, err
}
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.PEER,
Method: int(tools.CREATE_PEER),
Payload: b,
})
if p.State == pp.OFFLINE {
if time.Now().UTC().After(pr.ExpiryDate) {
return pp.SELF == p.Relation, nil, errors.New("peer " + key + " is offline")
}
return pp.SELF == p.Relation, p, nil
@@ -120,6 +92,7 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
type GetValue struct {
Key string `json:"key"`
PeerID string `json:"peer_id,omitempty"`
}
type GetResponse struct {
@@ -127,162 +100,236 @@ type GetResponse struct {
Records map[string]PeerRecord `json:"records,omitempty"`
}
func (ix *IndexerService) genKey(did string) string {
return "/node/" + did
}
func (ix *IndexerService) genPIDKey(peerID string) string {
return "/pid/" + peerID
}
func (ix *IndexerService) initNodeHandler() {
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleNodeHeartbeat)
logger := oclib.GetLogger()
logger.Info().Msg("Init Node Handler")
// Each heartbeat from a node carries a freshly signed PeerRecord.
// Republish it to the DHT so the record never expires as long as the node
// is alive — no separate publish stream needed from the node side.
ix.AfterHeartbeat = func(hb *common.Heartbeat) {
// Priority 1: use the fresh signed PeerRecord embedded in the heartbeat.
// Each heartbeat tick, the node re-signs with ExpiryDate = now+2min, so
// this record is always fresh. Fetching from DHT would give a stale expiry.
var rec PeerRecord
if len(hb.Record) > 0 {
if err := json.Unmarshal(hb.Record, &rec); err != nil {
logger.Warn().Err(err).Msg("indexer: heartbeat embedded record unmarshal failed")
return
}
} else {
// Fallback: node didn't embed a record yet (first heartbeat before claimInfo).
// Fetch from DHT using the DID resolved by HandleHeartbeat.
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
res, err := ix.DHT.GetValue(ctx2, ix.genKey(hb.DID))
cancel2()
if err != nil {
logger.Warn().Err(err).Str("did", hb.DID).Msg("indexer: DHT fetch for refresh failed")
return
}
if err := json.Unmarshal(res, &rec); err != nil {
logger.Warn().Err(err).Str("did", hb.DID).Msg("indexer: heartbeat record unmarshal failed")
return
}
}
if _, err := rec.Verify(); err != nil {
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: heartbeat record signature invalid")
return
}
data, err := json.Marshal(rec)
if err != nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
logger.Info().Msg("REFRESH PutValue " + ix.genKey(rec.DID))
if err := ix.DHT.PutValue(ctx, ix.genKey(rec.DID), data); err != nil {
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: DHT refresh failed")
cancel()
return
}
cancel()
if rec.PeerID != "" {
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
ix.DHT.PutValue(ctx2, ix.genPIDKey(rec.PeerID), []byte(rec.DID))
cancel2()
}
}
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleHeartbeat)
ix.Host.SetStreamHandler(common.ProtocolPublish, ix.handleNodePublish)
ix.Host.SetStreamHandler(common.ProtocolGet, ix.handleNodeGet)
ix.Host.SetStreamHandler(common.ProtocolIndexerCandidates, ix.handleCandidateRequest)
ix.initSearchHandlers()
}
// handleCandidateRequest responds to a node's consensus candidate request.
// Returns a random sample of indexers from the local DHT cache.
func (ix *IndexerService) handleCandidateRequest(s network.Stream) {
defer s.Close()
s.SetDeadline(time.Now().Add(5 * time.Second))
var req common.IndexerCandidatesRequest
if err := json.NewDecoder(s).Decode(&req); err != nil {
return
}
if req.Count <= 0 || req.Count > 10 {
req.Count = 3
}
ix.dhtCacheMu.RLock()
cache := make([]dhtCacheEntry, len(ix.dhtCache))
copy(cache, ix.dhtCache)
ix.dhtCacheMu.RUnlock()
// Shuffle for randomness: each voter offers a different subset.
rand.Shuffle(len(cache), func(i, j int) { cache[i], cache[j] = cache[j], cache[i] })
candidates := make([]lpp.AddrInfo, 0, req.Count)
for _, e := range cache {
if len(candidates) >= req.Count {
break
}
candidates = append(candidates, e.AI)
}
json.NewEncoder(s).Encode(common.IndexerCandidatesResponse{Candidates: candidates})
}
func (ix *IndexerService) handleNodePublish(s network.Stream) {
defer s.Close()
logger := oclib.GetLogger()
remotePeer := s.Conn().RemotePeer()
if err := ix.behavior.RecordPublish(remotePeer); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("publish refused")
s.Reset()
return
}
for {
var rec PeerRecord
if err := json.NewDecoder(s).Decode(&rec); err != nil {
continue
}
rec2 := PeerRecord{
Name: rec.Name,
DID: rec.DID, // REAL PEER ID
PubKey: rec.PubKey,
PeerID: rec.PeerID,
}
if _, err := rec2.Verify(); err != nil {
logger.Err(err)
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
return
}
continue
}
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) { // already expired
logger.Warn().Msg(rec.PeerID + " is expired.")
continue
if _, err := rec.Verify(); err != nil {
ix.behavior.RecordBadSignature(remotePeer)
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("bad signature on publish")
return
}
pid, err := peer.Decode(rec.PeerID)
if err := ix.behavior.CheckIdentity(remotePeer, rec.DID); err != nil {
logger.Warn().Err(err).Msg("identity mismatch on publish")
s.Reset()
return
}
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) {
logger.Err(errors.New(rec.PeerID + " is expired."))
return
}
pid, err := lpp.Decode(rec.PeerID)
if err != nil {
continue
return
}
ix.StreamMU.Lock()
if ix.StreamRecords[common.ProtocolPublish] == nil {
ix.StreamRecords[common.ProtocolPublish] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
defer ix.StreamMU.Unlock()
if ix.StreamRecords[common.ProtocolHeartbeat] == nil {
ix.StreamRecords[common.ProtocolHeartbeat] = map[lpp.ID]*common.StreamRecord[PeerRecord]{}
}
streams := ix.StreamRecords[common.ProtocolPublish]
streams := ix.StreamRecords[common.ProtocolHeartbeat]
if srec, ok := streams[pid]; ok {
fmt.Println("UPDATE PUBLISH", pid)
srec.DID = rec.DID
srec.Record = rec
srec.LastSeen = time.Now().UTC()
} else {
fmt.Println("CREATE PUBLISH", pid)
streams[pid] = &common.StreamRecord[PeerRecord]{ // HeartBeat wil
DID: rec.DID,
Record: rec,
LastSeen: time.Now().UTC(),
}
srec.HeartbeatStream.UptimeTracker.LastSeen = time.Now().UTC()
}
if ix.LongLivedPubSubs[common.TopicPubSubNodeActivity] != nil && !rec.NoPub {
ad, err := peer.AddrInfoFromString("/ip4/" + conf.GetConfig().Hostname + " /tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + " /p2p/" + ix.Host.ID().String())
if err == nil {
if b, err := json.Marshal(common.TopicNodeActivityPub{
Disposer: *ad,
DID: rec.DID,
Name: rec.Name,
PeerID: pid.String(),
NodeActivity: pp.ONLINE,
}); err == nil {
ix.LongLivedPubSubs[common.TopicPubSubNodeActivity].Publish(context.Background(), b)
}
key := ix.genKey(rec.DID)
data, err := json.Marshal(rec)
if err != nil {
logger.Err(err)
return
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
logger.Err(err)
cancel()
return
}
cancel()
if rec.TTL > 0 {
rec.NoPub = true
for _, ad := range common.StaticIndexers {
if ad.ID == s.Conn().RemotePeer() {
continue
// Secondary index: /pid/<peerID> → DID, so peers can resolve by libp2p PeerID.
if rec.PeerID != "" {
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(rec.PeerID), []byte(rec.DID)); err != nil {
logger.Err(err).Str("pid", rec.PeerID).Msg("indexer: failed to write pid index")
}
if common.StreamIndexers[common.ProtocolPublish][ad.ID] == nil {
continue
cancel2()
}
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
rec.TTL -= 1
if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream
continue
}
}
}
ix.StreamMU.Unlock()
return
}
}
func (ix *IndexerService) handleNodeGet(s network.Stream) {
defer s.Close()
logger := oclib.GetLogger()
remotePeer := s.Conn().RemotePeer()
if err := ix.behavior.RecordGet(remotePeer); err != nil {
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("get refused")
s.Reset()
return
}
for {
var req GetValue
if err := json.NewDecoder(s).Decode(&req); err != nil {
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
return
}
logger.Err(err)
continue
}
ix.StreamMU.Lock()
if ix.StreamRecords[common.ProtocolGet] == nil {
ix.StreamRecords[common.ProtocolGet] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
resp := GetResponse{Found: false, Records: map[string]PeerRecord{}}
// Resolve DID key: by PeerID (secondary /pid/ index) or direct DID key.
var key string
if req.PeerID != "" {
pidCtx, pidCancel := context.WithTimeout(context.Background(), 5*time.Second)
did, err := ix.DHT.GetValue(pidCtx, ix.genPIDKey(req.PeerID))
pidCancel()
if err == nil {
key = string(did)
}
resp := GetResponse{
Found: false,
Records: map[string]PeerRecord{},
} else {
key = req.Key
}
streams := ix.StreamRecords[common.ProtocolPublish]
// simple lookup by PeerID (or DID)
for _, rec := range streams {
if rec.Record.DID == req.Key || rec.Record.PeerID == req.Key || rec.Record.Name == req.Key { // OK
resp.Found = true
resp.Records[rec.Record.PeerID] = rec.Record
if rec.Record.DID == req.Key || rec.Record.PeerID == req.Key { // there unique... no need to proceed more...
if key != "" {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
c, err := ix.DHT.GetValue(ctx, ix.genKey(key))
cancel()
if err == nil {
var rec PeerRecord
if json.Unmarshal(c, &rec) == nil {
resp.Records[rec.PeerID] = rec
}
} else {
logger.Err(err).Msg("Failed to fetch PeerRecord from DHT " + key)
}
}
resp.Found = len(resp.Records) > 0
_ = json.NewEncoder(s).Encode(resp)
ix.StreamMU.Unlock()
return
}
continue
}
}
// if not found ask to my neighboor indexers
for pid, dsp := range ix.DisposedPeers {
if _, ok := resp.Records[dsp.PeerID]; !ok && (dsp.Name == req.Key || dsp.DID == req.Key || dsp.PeerID == req.Key) {
ctxTTL, err := context.WithTimeout(context.Background(), 120*time.Second)
if err != nil {
continue
}
if ix.Host.Network().Connectedness(pid) != network.Connected {
_ = ix.Host.Connect(ctxTTL, dsp.Disposer)
str, err := ix.Host.NewStream(ctxTTL, pid, common.ProtocolGet)
if err != nil {
continue
}
for {
if ctxTTL.Err() == context.DeadlineExceeded {
break
}
var subResp GetResponse
if err := json.NewDecoder(str).Decode(&resp); err != nil {
continue
}
if subResp.Found {
for k, v := range subResp.Records {
if _, ok := resp.Records[k]; !ok {
resp.Records[k] = v
}
}
break
}
}
}
}
}
// Not found
_ = json.NewEncoder(s).Encode(resp)
ix.StreamMU.Unlock()
}
}

View File

@@ -0,0 +1,228 @@
package indexer
import (
"context"
"encoding/json"
"strings"
"time"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
oclib "cloud.o-forge.io/core/oc-lib"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/network"
)
const TopicSearchPeer = "oc-search-peer"
// searchTimeout returns the configured search timeout, defaulting to 5s.
func searchTimeout() time.Duration {
if t := conf.GetConfig().SearchTimeout; t > 0 {
return time.Duration(t) * time.Second
}
return 5 * time.Second
}
// initSearchHandlers registers ProtocolSearchPeer and ProtocolSearchPeerResponse
// and subscribes to TopicSearchPeer on GossipSub.
func (ix *IndexerService) initSearchHandlers() {
ix.Host.SetStreamHandler(common.ProtocolSearchPeer, ix.handleSearchPeer)
ix.Host.SetStreamHandler(common.ProtocolSearchPeerResponse, ix.handleSearchPeerResponse)
ix.initSearchSubscription()
}
// updateReferent is called from HandleHeartbeat when Referent flag changes.
// If referent=true the node is added to referencedNodes; if false it is removed.
func (ix *IndexerService) updateReferent(pid pp.ID, rec PeerRecord, referent bool) {
ix.referencedNodesMu.Lock()
defer ix.referencedNodesMu.Unlock()
if referent {
ix.referencedNodes[pid] = rec
} else {
delete(ix.referencedNodes, pid)
}
}
// searchReferenced looks up nodes in referencedNodes matching the query.
// Matches on peerID (exact), DID (exact), or name (case-insensitive contains).
func (ix *IndexerService) searchReferenced(peerID, did, name string) []common.SearchHit {
ix.referencedNodesMu.RLock()
defer ix.referencedNodesMu.RUnlock()
nameLow := strings.ToLower(name)
var hits []common.SearchHit
for pid, rec := range ix.referencedNodes {
pidStr := pid.String()
matchPeerID := peerID != "" && pidStr == peerID
matchDID := did != "" && rec.DID == did
matchName := name != "" && strings.Contains(strings.ToLower(rec.Name), nameLow)
if matchPeerID || matchDID || matchName {
hits = append(hits, common.SearchHit{
PeerID: pidStr,
DID: rec.DID,
Name: rec.Name,
})
}
}
return hits
}
// handleSearchPeer is the ProtocolSearchPeer handler.
// The node opens this stream, sends a SearchPeerRequest, and reads results
// as they stream in. The stream stays open until timeout or node closes it.
func (ix *IndexerService) handleSearchPeer(s network.Stream) {
logger := oclib.GetLogger()
defer s.Reset()
var req common.SearchPeerRequest
if err := json.NewDecoder(s).Decode(&req); err != nil || req.QueryID == "" {
return
}
// streamCtx is cancelled when the node closes its end of the stream.
streamCtx, streamCancel := context.WithCancel(context.Background())
go func() {
// Block until the stream is reset/closed, then cancel our context.
buf := make([]byte, 1)
s.Read(buf) //nolint:errcheck — we only care about EOF/reset
streamCancel()
}()
defer streamCancel()
resultCh := make(chan []common.SearchHit, 16)
ix.pendingSearchesMu.Lock()
ix.pendingSearches[req.QueryID] = resultCh
ix.pendingSearchesMu.Unlock()
defer func() {
ix.pendingSearchesMu.Lock()
delete(ix.pendingSearches, req.QueryID)
ix.pendingSearchesMu.Unlock()
}()
// Check own referencedNodes immediately.
if hits := ix.searchReferenced(req.PeerID, req.DID, req.Name); len(hits) > 0 {
resultCh <- hits
}
// Broadcast search on GossipSub so other indexers can respond.
ix.publishSearchQuery(req.QueryID, req.PeerID, req.DID, req.Name)
// Stream results back to node as they arrive; reset idle timer on each result.
enc := json.NewEncoder(s)
idleTimer := time.NewTimer(searchTimeout())
defer idleTimer.Stop()
for {
select {
case hits := <-resultCh:
if err := enc.Encode(common.SearchPeerResult{QueryID: req.QueryID, Records: hits}); err != nil {
logger.Debug().Err(err).Msg("[search] stream write failed")
return
}
// Reset idle timeout: keep alive as long as results trickle in.
if !idleTimer.Stop() {
select {
case <-idleTimer.C:
default:
}
}
idleTimer.Reset(searchTimeout())
case <-idleTimer.C:
// No new result within timeout — close gracefully.
return
case <-streamCtx.Done():
// Node closed the stream (new search superseded this one).
return
}
}
}
// handleSearchPeerResponse is the ProtocolSearchPeerResponse handler.
// Another indexer opens this stream to deliver hits for a pending queryID.
func (ix *IndexerService) handleSearchPeerResponse(s network.Stream) {
defer s.Reset()
var result common.SearchPeerResult
if err := json.NewDecoder(s).Decode(&result); err != nil || result.QueryID == "" {
return
}
ix.pendingSearchesMu.Lock()
ch := ix.pendingSearches[result.QueryID]
ix.pendingSearchesMu.Unlock()
if ch != nil {
select {
case ch <- result.Records:
default: // channel full, drop — node may be slow
}
}
}
// publishSearchQuery broadcasts a SearchQuery on TopicSearchPeer.
func (ix *IndexerService) publishSearchQuery(queryID, peerID, did, name string) {
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RLock()
topic := ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicSearchPeer]
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RUnlock()
if topic == nil {
return
}
q := common.SearchQuery{
QueryID: queryID,
PeerID: peerID,
DID: did,
Name: name,
EmitterID: ix.Host.ID().String(),
}
b, err := json.Marshal(q)
if err != nil {
return
}
_ = topic.Publish(context.Background(), b)
}
// initSearchSubscription joins TopicSearchPeer and dispatches incoming queries.
func (ix *IndexerService) initSearchSubscription() {
logger := oclib.GetLogger()
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Lock()
topic, err := ix.PS.Join(TopicSearchPeer)
if err != nil {
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
logger.Err(err).Msg("[search] failed to join search topic")
return
}
ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicSearchPeer] = topic
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
common.SubscribeEvents(
ix.LongLivedStreamRecordedService.LongLivedPubSubService,
context.Background(),
TopicSearchPeer,
-1,
func(_ context.Context, q common.SearchQuery, _ string) {
ix.onSearchQuery(q)
},
)
}
// onSearchQuery handles an incoming GossipSub search broadcast.
// If we have matching referencedNodes, we respond to the emitting indexer.
func (ix *IndexerService) onSearchQuery(q common.SearchQuery) {
// Don't respond to our own broadcasts.
if q.EmitterID == ix.Host.ID().String() {
return
}
hits := ix.searchReferenced(q.PeerID, q.DID, q.Name)
if len(hits) == 0 {
return
}
emitterID, err := pp.Decode(q.EmitterID)
if err != nil {
return
}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
s, err := ix.Host.NewStream(ctx, emitterID, common.ProtocolSearchPeerResponse)
if err != nil {
return
}
defer s.Reset()
s.SetDeadline(time.Now().Add(5 * time.Second))
json.NewEncoder(s).Encode(common.SearchPeerResult{QueryID: q.QueryID, Records: hits})
}

View File

@@ -2,69 +2,491 @@ package indexer
import (
"context"
"encoding/json"
"errors"
"math/rand"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"strings"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
pp "cloud.o-forge.io/core/oc-lib/models/peer"
dht "github.com/libp2p/go-libp2p-kad-dht"
pubsub "github.com/libp2p/go-libp2p-pubsub"
record "github.com/libp2p/go-libp2p-record"
"github.com/libp2p/go-libp2p/core/host"
"github.com/libp2p/go-libp2p/core/peer"
pp "github.com/libp2p/go-libp2p/core/peer"
)
// Index Record is the model for the specialized registry of node connected to Indexer
// dhtCacheEntry holds one indexer discovered via DHT for use in suggestion responses.
type dhtCacheEntry struct {
AI pp.AddrInfo
LastSeen time.Time
}
// offloadState tracks which nodes we've already proposed migration to.
// When an indexer is overloaded (fill rate > offloadThreshold) it only sends
// SuggestMigrate to a small batch at a time; peers that don't migrate within
// offloadGracePeriod are moved to alreadyTried so a new batch can be picked.
type offloadState struct {
inBatch map[pp.ID]time.Time // peer → time added to current batch
alreadyTried map[pp.ID]struct{} // peers proposed to that didn't migrate
mu sync.Mutex
}
const (
offloadThreshold = 0.80 // fill rate above which to start offloading
offloadBatchSize = 5 // max concurrent "please migrate" proposals
offloadGracePeriod = 3 * common.RecommendedHeartbeatInterval
)
// IndexerService manages the indexer node's state: stream records, DHT, pubsub.
type IndexerService struct {
*common.LongLivedStreamRecordedService[PeerRecord]
PS *pubsub.PubSub
DHT *dht.IpfsDHT
isStrictIndexer bool
mu sync.RWMutex
DisposedPeers map[peer.ID]*common.TopicNodeActivityPub
dhtProvideCancel context.CancelFunc
bornAt time.Time
// Passive DHT cache: refreshed every 2 min in background, used for suggestions.
dhtCache []dhtCacheEntry
dhtCacheMu sync.RWMutex
// Offload state for overloaded-indexer migration proposals.
offload offloadState
// referencedNodes holds nodes that have designated this indexer as their
// search referent (Heartbeat.Referent=true). Used for distributed search.
referencedNodes map[pp.ID]PeerRecord
referencedNodesMu sync.RWMutex
// pendingSearches maps queryID → result channel for in-flight searches.
pendingSearches map[string]chan []common.SearchHit
pendingSearchesMu sync.Mutex
// behavior tracks per-node compliance (heartbeat rate, publish/get volume,
// identity consistency, signature failures).
behavior *NodeBehaviorTracker
// connGuard limits new-connection bursts to protect public indexers.
connGuard *ConnectionRateGuard
}
// if a pubsub is given... indexer is also an active oc-node. If not... your a strict indexer
// NewIndexerService creates an IndexerService.
// If ps is nil, this is a strict indexer (no pre-existing gossip sub from a node).
func NewIndexerService(h host.Host, ps *pubsub.PubSub, maxNode int) *IndexerService {
logger := oclib.GetLogger()
logger.Info().Msg("open indexer mode...")
var err error
ix := &IndexerService{
LongLivedStreamRecordedService: common.NewStreamRecordedService[PeerRecord](h, maxNode, false),
LongLivedStreamRecordedService: common.NewStreamRecordedService[PeerRecord](h, maxNode),
isStrictIndexer: ps == nil,
referencedNodes: map[pp.ID]PeerRecord{},
pendingSearches: map[string]chan []common.SearchHit{},
behavior: newNodeBehaviorTracker(),
connGuard: newConnectionRateGuard(),
}
if ps == nil { // generate your fresh gossip for the flow of killed node... EVERYBODY should know !
if ps == nil {
ps, err = pubsub.NewGossipSub(context.Background(), ix.Host)
if err != nil {
panic(err) // can't run your indexer without a propalgation pubsub, of state of node.
panic(err) // can't run your indexer without a propagation pubsub
}
}
ix.PS = ps
// later TODO : all indexer laucnh a private replica of them self. DEV OPS
if ix.isStrictIndexer {
logger.Info().Msg("connect to indexers as strict indexer...")
common.ConnectToIndexers(h, 0, 5, ix.Host.ID()) // TODO : make var to change how many indexers are allowed.
logger.Info().Msg("subscribe to node activity as strict indexer...")
common.ConnectToIndexers(h, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer*2)
logger.Info().Msg("subscribe to decentralized search flow as strict indexer...")
ix.SubscribeToSearch(ix.PS, nil)
go ix.SubscribeToSearch(ix.PS, nil)
}
f := func(ctx context.Context, evt common.TopicNodeActivityPub, _ string) {
ix.mu.Lock()
if evt.NodeActivity == pp.OFFLINE {
delete(ix.DisposedPeers, evt.Disposer.ID)
ix.LongLivedStreamRecordedService.AfterDelete = func(pid pp.ID, name, did string) {
// Remove behavior state for peers that are no longer connected and
// have no active ban — keeps memory bounded to the live node set.
ix.behavior.Cleanup(pid)
}
if evt.NodeActivity == pp.ONLINE {
ix.DisposedPeers[evt.Disposer.ID] = &evt
// AllowInbound: fired once per stream open, before any heartbeat is decoded.
// 1. Reject peers that are currently banned (behavioral strikes).
// 2. For genuinely new connections, apply the burst guard.
ix.AllowInbound = func(remotePeer pp.ID, isNew bool) error {
if ix.behavior.IsBanned(remotePeer) {
return errors.New("peer is banned")
}
ix.mu.Unlock()
if isNew && !ix.connGuard.Allow() {
return errors.New("connection rate limit exceeded, retry later")
}
ix.SubscribeToNodeActivity(ix.PS, &f) // now we subscribe to a long run topic named node-activity, to relay message.
ix.initNodeHandler() // then listen up on every protocol expected
return nil
}
// ValidateHeartbeat: fired on every heartbeat tick for an established stream.
// Checks heartbeat cadence — rejects if the node is sending too fast.
ix.ValidateHeartbeat = func(remotePeer pp.ID) error {
return ix.behavior.RecordHeartbeat(remotePeer)
}
// Parse bootstrap peers from configured indexer addresses so the DHT can
// find its routing table entries even in a fresh deployment.
var bootstrapPeers []pp.AddrInfo
for _, addrStr := range strings.Split(conf.GetConfig().IndexerAddresses, ",") {
addrStr = strings.TrimSpace(addrStr)
if addrStr == "" {
continue
}
if ad, err := pp.AddrInfoFromString(addrStr); err == nil {
bootstrapPeers = append(bootstrapPeers, *ad)
}
}
dhtOpts := []dht.Option{
dht.Mode(dht.ModeServer),
dht.ProtocolPrefix("oc"),
dht.Validator(record.NamespacedValidator{
"node": PeerRecordValidator{},
"name": DefaultValidator{},
"pid": DefaultValidator{},
}),
}
if len(bootstrapPeers) > 0 {
dhtOpts = append(dhtOpts, dht.BootstrapPeers(bootstrapPeers...))
}
if ix.DHT, err = dht.New(context.Background(), ix.Host, dhtOpts...); err != nil {
logger.Info().Msg(err.Error())
return nil
}
// Make the DHT available for replenishment from other packages.
common.SetDiscoveryDHT(ix.DHT)
ix.bornAt = time.Now().UTC()
ix.offload.inBatch = make(map[pp.ID]time.Time)
ix.offload.alreadyTried = make(map[pp.ID]struct{})
ix.initNodeHandler()
// Build and send a HeartbeatResponse after each received node heartbeat.
// Raw metrics only — no pre-cooked score. Node computes the score itself.
ix.BuildHeartbeatResponse = func(remotePeer pp.ID, need int, challenges []string, challengeDID string, referent bool) *common.HeartbeatResponse {
ix.StreamMU.RLock()
peerCount := len(ix.StreamRecords[common.ProtocolHeartbeat])
// Collect lastSeen per active peer for challenge responses.
type peerMeta struct {
found bool
lastSeen time.Time
}
peerLookup := make(map[string]peerMeta, peerCount)
var remotePeerRecord PeerRecord
for pid, rec := range ix.StreamRecords[common.ProtocolHeartbeat] {
var ls time.Time
if rec.HeartbeatStream != nil && rec.HeartbeatStream.UptimeTracker != nil {
ls = rec.HeartbeatStream.UptimeTracker.LastSeen
}
peerLookup[pid.String()] = peerMeta{found: true, lastSeen: ls}
if pid == remotePeer {
remotePeerRecord = rec.Record
}
}
ix.StreamMU.RUnlock()
// Update referent designation: node marks its best-scored indexer with Referent=true.
ix.updateReferent(remotePeer, remotePeerRecord, referent)
maxN := ix.MaxNodesConn()
fillRate := 0.0
if maxN > 0 {
fillRate = float64(peerCount) / float64(maxN)
if fillRate > 1 {
fillRate = 1
}
}
resp := &common.HeartbeatResponse{
FillRate: fillRate,
PeerCount: peerCount,
MaxNodes: maxN,
BornAt: ix.bornAt,
}
// Answer each challenged PeerID with raw found + lastSeen.
for _, pidStr := range challenges {
meta := peerLookup[pidStr] // zero value if not found
entry := common.ChallengeEntry{
PeerID: pidStr,
Found: meta.found,
LastSeen: meta.lastSeen,
}
resp.Challenges = append(resp.Challenges, entry)
}
// DHT challenge: retrieve the node's own DID to prove DHT is functional.
if challengeDID != "" {
ctx3, cancel3 := context.WithTimeout(context.Background(), 3*time.Second)
val, err := ix.DHT.GetValue(ctx3, "/node/"+challengeDID)
cancel3()
resp.DHTFound = err == nil
if err == nil {
resp.DHTPayload = json.RawMessage(val)
}
}
// Random sample of connected nodes as witnesses (up to 3).
// Never include the requesting peer itself — asking a node to witness
// itself is circular and meaningless.
ix.StreamMU.RLock()
for pidStr := range peerLookup {
if len(resp.Witnesses) >= 3 {
break
}
pid, err := pp.Decode(pidStr)
if err != nil || pid == remotePeer || pid == ix.Host.ID() {
continue
}
addrs := ix.Host.Peerstore().Addrs(pid)
ai := common.FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
if len(ai.Addrs) > 0 {
resp.Witnesses = append(resp.Witnesses, ai)
}
}
ix.StreamMU.RUnlock()
// Attach suggestions: exactly `need` entries from the DHT cache.
// If the indexer is overloaded (SuggestMigrate will be set below), always
// provide at least 1 suggestion even when need == 0, so the node has
// somewhere to go.
suggestionsNeeded := need
if fillRate > offloadThreshold && suggestionsNeeded < 1 {
suggestionsNeeded = 1
}
if suggestionsNeeded > 0 {
ix.dhtCacheMu.RLock()
// When offloading, pick from a random offset within the top N of the
// cache so concurrent migrations spread across multiple targets rather
// than all rushing to the same least-loaded indexer (thundering herd).
// For normal need-based suggestions the full sorted order is fine.
cache := ix.dhtCache
if fillRate > offloadThreshold && len(cache) > suggestionsNeeded {
const spreadWindow = 5 // sample from the top-5 least-loaded
window := spreadWindow
if window > len(cache) {
window = len(cache)
}
start := rand.Intn(window)
cache = cache[start:]
}
for _, e := range cache {
if len(resp.Suggestions) >= suggestionsNeeded {
break
}
// Never suggest the requesting peer itself or this indexer.
if e.AI.ID == remotePeer || e.AI.ID == h.ID() {
continue
}
resp.Suggestions = append(resp.Suggestions, e.AI)
}
ix.dhtCacheMu.RUnlock()
}
// Offload logic: when fill rate is too high, selectively ask nodes to migrate.
if fillRate > offloadThreshold && len(resp.Suggestions) > 0 {
now := time.Now()
ix.offload.mu.Lock()
// Expire stale batch entries -> move to alreadyTried.
for pid, addedAt := range ix.offload.inBatch {
if now.Sub(addedAt) > offloadGracePeriod {
ix.offload.alreadyTried[pid] = struct{}{}
delete(ix.offload.inBatch, pid)
}
}
// Reset alreadyTried if we've exhausted the whole pool.
if len(ix.offload.alreadyTried) >= peerCount {
ix.offload.alreadyTried = make(map[pp.ID]struct{})
}
_, tried := ix.offload.alreadyTried[remotePeer]
_, inBatch := ix.offload.inBatch[remotePeer]
if !tried {
if inBatch {
resp.SuggestMigrate = true
} else if len(ix.offload.inBatch) < offloadBatchSize {
ix.offload.inBatch[remotePeer] = now
resp.SuggestMigrate = true
}
}
ix.offload.mu.Unlock()
} else if fillRate <= offloadThreshold {
// Fill rate back to normal: reset offload state.
ix.offload.mu.Lock()
if len(ix.offload.inBatch) > 0 || len(ix.offload.alreadyTried) > 0 {
ix.offload.inBatch = make(map[pp.ID]time.Time)
ix.offload.alreadyTried = make(map[pp.ID]struct{})
}
ix.offload.mu.Unlock()
}
// Bootstrap: if this indexer has no indexers of its own, probe the
// connecting peer to check it supports ProtocolHeartbeat (i.e. it is
// itself an indexer). Plain nodes do not register the handler and the
// negotiation fails instantly — no wasted heartbeat cycle.
// Run in a goroutine: the probe is a short blocking stream open.
if len(common.Indexers.GetAddrs()) == 0 && remotePeer != h.ID() {
pid := remotePeer
go func() {
if !common.SupportsHeartbeat(h, pid) {
logger.Debug().Str("peer", pid.String()).
Msg("[bootstrap] inbound peer has no heartbeat handler — not an indexer, skipping")
return
}
addrs := h.Peerstore().Addrs(pid)
ai := common.FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
if len(ai.Addrs) == 0 {
return
}
key := pid.String()
if !common.Indexers.ExistsAddr(key) {
adCopy := ai
common.Indexers.SetAddr(key, &adCopy)
common.Indexers.NudgeIt()
logger.Info().Str("peer", key).Msg("[bootstrap] no indexers — added inbound indexer peer as candidate")
}
}()
}
return resp
}
// Advertise this indexer in the DHT so nodes can discover it.
fillRateFn := func() float64 {
ix.StreamMU.RLock()
n := len(ix.StreamRecords[common.ProtocolHeartbeat])
ix.StreamMU.RUnlock()
maxN := ix.MaxNodesConn()
if maxN <= 0 {
return 0
}
rate := float64(n) / float64(maxN)
if rate > 1 {
rate = 1
}
return rate
}
ix.startDHTCacheRefresh()
ix.startDHTProvide(fillRateFn)
return ix
}
// startDHTCacheRefresh periodically queries the DHT for peer indexers and
// refreshes ix.dhtCache. This passive cache is used by BuildHeartbeatResponse
// to suggest better indexers to connected nodes without any per-request cost.
func (ix *IndexerService) startDHTCacheRefresh() {
ctx, cancel := context.WithCancel(context.Background())
// Store cancel alongside the provide cancel so Close() stops both.
prevCancel := ix.dhtProvideCancel
ix.dhtProvideCancel = func() {
if prevCancel != nil {
prevCancel()
}
cancel()
}
go func() {
logger := oclib.GetLogger()
refresh := func() {
if ix.DHT == nil {
return
}
// Fetch more than needed so SelectByFillRate can filter for diversity.
raw := common.DiscoverIndexersFromDHT(ix.Host, ix.DHT, 30)
if len(raw) == 0 {
return
}
// Remove self before selection.
filtered := raw[:0]
for _, ai := range raw {
if ai.ID != ix.Host.ID() {
filtered = append(filtered, ai)
}
}
// SelectByFillRate applies /24 subnet diversity and fill-rate weighting.
// Fill rates are unknown at this stage (nil map) so all peers get
// the neutral prior f=0.5 — diversity filtering still applies.
selected := common.SelectByFillRate(filtered, nil, 10)
now := time.Now()
ix.dhtCacheMu.Lock()
ix.dhtCache = ix.dhtCache[:0]
for _, ai := range selected {
ix.dhtCache = append(ix.dhtCache, dhtCacheEntry{AI: ai, LastSeen: now})
}
ix.dhtCacheMu.Unlock()
logger.Info().Int("cached", len(selected)).Msg("[dht] indexer suggestion cache refreshed")
}
// Initial delay: let the DHT routing table warm up first.
select {
case <-time.After(30 * time.Second):
case <-ctx.Done():
return
}
refresh()
t := time.NewTicker(2 * time.Minute)
defer t.Stop()
for {
select {
case <-t.C:
refresh()
case <-ctx.Done():
return
}
}
}()
}
// startDHTProvide bootstraps the DHT and starts a goroutine that periodically
// advertises this indexer under the well-known provider key.
func (ix *IndexerService) startDHTProvide(fillRateFn func() float64) {
ctx, cancel := context.WithCancel(context.Background())
ix.dhtProvideCancel = cancel
go func() {
logger := oclib.GetLogger()
// Wait until a routable (non-loopback) address is available.
for i := 0; i < 12; i++ {
addrs := ix.Host.Addrs()
if len(addrs) > 0 && !strings.Contains(addrs[len(addrs)-1].String(), "127.0.0.1") {
break
}
select {
case <-ctx.Done():
return
case <-time.After(5 * time.Second):
}
}
if err := ix.DHT.Bootstrap(ctx); err != nil {
logger.Warn().Err(err).Msg("[dht] bootstrap failed")
}
provide := func() {
pCtx, pCancel := context.WithTimeout(ctx, 30*time.Second)
defer pCancel()
if err := ix.DHT.Provide(pCtx, common.IndexerCID(), true); err != nil {
logger.Warn().Err(err).Msg("[dht] Provide failed")
} else {
logger.Info().Float64("fill_rate", fillRateFn()).Msg("[dht] indexer advertised in DHT")
}
}
provide()
t := time.NewTicker(common.RecommendedHeartbeatInterval)
defer t.Stop()
for {
select {
case <-t.C:
provide()
case <-ctx.Done():
return
}
}
}()
}
func (ix *IndexerService) Close() {
if ix.dhtProvideCancel != nil {
ix.dhtProvideCancel()
}
ix.DHT.Close()
ix.PS.UnregisterTopicValidator(common.TopicPubSubSearch)
for _, s := range ix.StreamRecords {
for _, ss := range s {
ss.Stream.Close()
ss.HeartbeatStream.Stream.Close()
}
}

View File

@@ -0,0 +1,64 @@
package indexer
import (
"encoding/json"
"errors"
"time"
)
type DefaultValidator struct{}
func (v DefaultValidator) Validate(key string, value []byte) error {
return nil
}
func (v DefaultValidator) Select(key string, values [][]byte) (int, error) {
return 0, nil
}
type PeerRecordValidator struct{}
func (v PeerRecordValidator) Validate(key string, value []byte) error {
var rec PeerRecord
if err := json.Unmarshal(value, &rec); err != nil {
return errors.New("invalid json")
}
// PeerID must exist
if rec.PeerID == "" {
return errors.New("missing peerID")
}
// Expiry check
if rec.ExpiryDate.Before(time.Now().UTC()) {
return errors.New("record expired")
}
// Signature verification
if _, err := rec.Verify(); err != nil {
return errors.New("invalid signature")
}
return nil
}
func (v PeerRecordValidator) Select(key string, values [][]byte) (int, error) {
var newest time.Time
index := 0
for i, val := range values {
var rec PeerRecord
if err := json.Unmarshal(val, &rec); err != nil {
continue
}
if rec.ExpiryDate.After(newest) {
newest = rec.ExpiryDate
index = i
}
}
return index, nil
}

View File

@@ -4,13 +4,100 @@ import (
"context"
"encoding/json"
"fmt"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/stream"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/config"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
func ListenNATS(n Node) {
type configPayload struct {
PeerID string `json:"source_peer_id"`
}
type executionConsidersPayload struct {
PeerIDs []string `json:"peer_ids"`
}
func ListenNATS(n *Node) {
tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
/*tools.VERIFY_RESOURCE: func(resp tools.NATSResponse) {
if resp.FromApp == config.GetAppName() {
return
}
if res, err := resources.ToResource(resp.Datatype.EnumIndex(), resp.Payload); err == nil {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
p := access.LoadOne(res.GetCreatorID())
realP := p.ToPeer()
if realP == nil {
return
} else if realP.Relation == peer.SELF {
pubKey, err := common.PubKeyFromString(realP.PublicKey) // extract pubkey from pubkey str
if err != nil {
return
}
ok, _ := pubKey.Verify(resp.Payload, res.GetSignature())
if b, err := json.Marshal(stream.Verify{
IsVerified: ok,
}); err == nil {
tools.NewNATSCaller().SetNATSPub(tools.VERIFY_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Method: int(tools.VERIFY_RESOURCE),
Payload: b,
})
}
} else if realP.Relation != peer.BLACKLIST {
n.StreamService.PublishVerifyResources(&resp.Datatype, resp.User, realP.PeerID, resp.Payload)
}
}
},*/
tools.CREATE_RESOURCE: func(resp tools.NATSResponse) {
if resp.FromApp == config.GetAppName() && resp.Datatype != tools.PEER && resp.Datatype != tools.WORKFLOW {
return
}
logger := oclib.GetLogger()
m := map[string]interface{}{}
err := json.Unmarshal(resp.Payload, &m)
if err != nil {
logger.Err(err)
return
}
p := &peer.Peer{}
p = p.Deserialize(m, p).(*peer.Peer)
ad, err := pp.AddrInfoFromString(p.StreamAddress)
if err != nil {
return
}
// Non-partner: close any existing streams for this peer.
if p.Relation != peer.PARTNER {
n.StreamService.Mu.Lock()
defer n.StreamService.Mu.Unlock()
ps := common.ProtocolStream{}
for proto, s := range n.StreamService.Streams {
m := map[pp.ID]*common.Stream{}
for k := range s {
if ad.ID != k {
m[k] = s[k]
} else {
s[k].Stream.Close()
}
}
ps[proto] = m
}
n.StreamService.Streams = ps
}
},
tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) {
fmt.Println("PROPALGATION")
if resp.FromApp == config.GetAppName() {
return
}
var propalgation tools.PropalgationMessage
err := json.Unmarshal(resp.Payload, &propalgation)
var dt *tools.DataType
@@ -18,20 +105,108 @@ func ListenNATS(n Node) {
dtt := tools.DataType(propalgation.DataType)
dt = &dtt
}
fmt.Println("PROPALGATION ACT", propalgation.Action, propalgation.Action == tools.PB_CREATE, err)
if err == nil {
switch propalgation.Action {
case tools.PB_CREATE:
case tools.PB_UPDATE:
case tools.PB_DELETE:
n.StreamService.ToPartnerPublishEvent(
case tools.PB_ADMIRALTY_CONFIG, tools.PB_MINIO_CONFIG:
var m configPayload
var proto protocol.ID = stream.ProtocolAdmiraltyConfigResource
if propalgation.Action == tools.PB_MINIO_CONFIG {
proto = stream.ProtocolMinioConfigResource
}
if err := json.Unmarshal(resp.Payload, &m); err == nil {
peers, _ := n.GetPeerRecord(context.Background(), m.PeerID)
for _, p := range peers {
n.StreamService.PublishCommon(&resp.Datatype, resp.User,
p.PeerID, proto, resp.Payload)
}
}
case tools.PB_CREATE, tools.PB_UPDATE, tools.PB_DELETE:
fmt.Println(propalgation.Action, dt, resp.User, propalgation.Payload)
fmt.Println(n.StreamService.ToPartnerPublishEvent(
context.Background(),
propalgation.Action,
dt, resp.User,
propalgation.Payload,
)
case tools.PB_SEARCH:
))
case tools.PB_CONSIDERS:
switch resp.Datatype {
case tools.BOOKING, tools.PURCHASE_RESOURCE, tools.WORKFLOW_EXECUTION:
var m executionConsidersPayload
if err := json.Unmarshal(resp.Payload, &m); err == nil {
for _, p := range m.PeerIDs {
peers, _ := n.GetPeerRecord(context.Background(), p)
for _, pp := range peers {
n.StreamService.PublishCommon(&resp.Datatype, resp.User,
pp.PeerID, stream.ProtocolConsidersResource, resp.Payload)
}
}
}
default:
// minio / admiralty config considers — route back to OriginID.
var m struct {
OriginID string `json:"origin_id"`
}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil && m.OriginID != "" {
peers, _ := n.GetPeerRecord(context.Background(), m.OriginID)
for _, p := range peers {
n.StreamService.PublishCommon(nil, resp.User,
p.PeerID, stream.ProtocolConsidersResource, propalgation.Payload)
}
}
}
case tools.PB_PLANNER:
m := map[string]interface{}{}
json.Unmarshal(propalgation.Payload, &m)
if err := json.Unmarshal(resp.Payload, &m); err == nil {
b := []byte{}
if len(m) > 1 {
b = resp.Payload
}
if m["peer_id"] == nil { // send to every active stream
n.StreamService.Mu.Lock()
if n.StreamService.Streams[stream.ProtocolSendPlanner] != nil {
for pid := range n.StreamService.Streams[stream.ProtocolSendPlanner] { // send Planner can be long lived - it's a conn
n.StreamService.PublishCommon(nil, resp.User, pid.String(), stream.ProtocolSendPlanner, b)
}
}
} else {
n.StreamService.PublishCommon(nil, resp.User, fmt.Sprintf("%v", m["peer_id"]), stream.ProtocolSendPlanner, b)
}
n.StreamService.Mu.Unlock()
}
case tools.PB_CLOSE_PLANNER:
m := map[string]interface{}{}
if err := json.Unmarshal(resp.Payload, &m); err == nil {
n.StreamService.Mu.Lock()
if pid, err := pp.Decode(fmt.Sprintf("%v", m["peer_id"])); err == nil {
if n.StreamService.Streams[stream.ProtocolSendPlanner] != nil && n.StreamService.Streams[stream.ProtocolSendPlanner][pid] != nil {
n.StreamService.Streams[stream.ProtocolSendPlanner][pid].Stream.Close()
delete(n.StreamService.Streams[stream.ProtocolSendPlanner], pid)
}
}
n.StreamService.Mu.Unlock()
}
case tools.PB_SEARCH:
if propalgation.DataType == int(tools.PEER) {
m := map[string]interface{}{}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
needle := fmt.Sprintf("%v", m["search"])
userKey := resp.User
go n.SearchPeerRecord(userKey, needle, func(hit common.SearchHit) {
if b, err := json.Marshal(hit); err == nil {
tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(tools.PEER),
Method: int(tools.SEARCH_EVENT),
Payload: b,
})
}
})
}
} else {
m := map[string]interface{}{}
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
n.PubSubService.SearchPublishEvent(
context.Background(),
dt,
@@ -41,6 +216,8 @@ func ListenNATS(n Node) {
)
}
}
}
}
},
})
}

View File

@@ -2,7 +2,6 @@ package node
import (
"context"
"crypto/sha256"
"encoding/json"
"errors"
"fmt"
@@ -11,17 +10,29 @@ import (
"oc-discovery/daemons/node/indexer"
"oc-discovery/daemons/node/pubsub"
"oc-discovery/daemons/node/stream"
"sync"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/google/uuid"
"github.com/libp2p/go-libp2p"
pubsubs "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/crypto"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
"github.com/libp2p/go-libp2p/p2p/security/noise"
)
// activeSearch tracks an in-flight distributed peer search for one user.
type activeSearch struct {
queryID string
cancel context.CancelFunc
}
type Node struct {
*common.LongLivedStreamRecordedService[interface{}] // change type of stream
PS *pubsubs.PubSub
@@ -30,6 +41,13 @@ type Node struct {
StreamService *stream.StreamService
PeerID pp.ID
isIndexer bool
peerRecord *indexer.PeerRecord
// activeSearches: one streaming search per user; new search cancels previous.
activeSearchesMu sync.Mutex
activeSearches map[string]*activeSearch
Mu sync.RWMutex
}
func InitNode(isNode bool, isIndexer bool) (*Node, error) {
@@ -38,7 +56,7 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
}
logger := oclib.GetLogger()
logger.Info().Msg("retrieving private key...")
priv, err := common.LoadKeyFromFilePrivate() // your node private key
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
if err != nil {
return nil, err
}
@@ -48,13 +66,17 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
return nil, nil
}
logger.Info().Msg("open a host...")
gater := newOCConnectionGater(nil) // host set below after creation
h, err := libp2p.New(
libp2p.PrivateNetwork(psk),
libp2p.Identity(priv),
libp2p.Security(noise.ID, noise.New),
libp2p.ListenAddrStrings(
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", conf.GetConfig().NodeEndpointPort),
),
libp2p.ConnectionGater(gater),
)
gater.host = h // wire host back into gater now that it exists
logger.Info().Msg("Host open on " + h.ID().String())
if err != nil {
return nil, errors.New("no host no node")
@@ -62,8 +84,16 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
node := &Node{
PeerID: h.ID(),
isIndexer: isIndexer,
LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000, false),
LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000),
activeSearches: map[string]*activeSearch{},
}
// Register the bandwidth probe handler so any peer measuring this node's
// throughput can open a dedicated probe stream and read the echo.
h.SetStreamHandler(common.ProtocolBandwidthProbe, common.HandleBandwidthProbe)
// Register the witness query handler so peers can ask this node's view of indexers.
h.SetStreamHandler(common.ProtocolWitnessQuery, func(s network.Stream) {
common.HandleWitnessQuery(h, s)
})
var ps *pubsubs.PubSub
if isNode {
logger.Info().Msg("generate opencloud node...")
@@ -72,13 +102,34 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
panic(err) // can't run your node without a propalgation pubsub, of state of node.
}
node.PS = ps
// buildRecord returns a fresh signed PeerRecord as JSON, embedded in each
// heartbeat so the receiving indexer can republish it to the DHT directly.
// peerRecord is nil until claimInfo runs, so the first ~20s heartbeats carry
// no record — that's fine, claimInfo publishes once synchronously at startup.
buildRecord := func() json.RawMessage {
if node.peerRecord == nil {
return nil
}
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return nil
}
fresh := *node.peerRecord
fresh.PeerRecordPayload.ExpiryDate = time.Now().UTC().Add(2 * time.Minute)
payload, _ := json.Marshal(fresh.PeerRecordPayload)
fresh.Signature, err = priv.Sign(payload)
if err != nil {
return nil
}
b, _ := json.Marshal(fresh)
return json.RawMessage(b)
}
logger.Info().Msg("connect to indexers...")
common.ConnectToIndexers(node.Host, 0, 5, node.PeerID) // TODO : make var to change how many indexers are allowed.
common.ConnectToIndexers(node.Host, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer, buildRecord)
logger.Info().Msg("claims my node...")
if _, err := node.claimInfo(conf.GetConfig().Name, conf.GetConfig().Hostname); err != nil {
panic(err)
}
logger.Info().Msg("subscribe to decentralized search flow...")
logger.Info().Msg("run garbage collector...")
node.StartGC(30 * time.Second)
@@ -90,19 +141,25 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
panic(err)
}
f := func(ctx context.Context, evt common.Event, topic string) {
if p, err := node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
node.StreamService.SendResponse(p[0], &evt)
m := map[string]interface{}{}
err := json.Unmarshal(evt.Payload, &m)
if err != nil || evt.From == node.PeerID.String() {
return
}
if p, err := node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 && m["search"] != nil {
node.StreamService.SendResponse(p[0], &evt, fmt.Sprintf("%v", m["search"]))
}
}
logger.Info().Msg("subscribe to decentralized search flow...")
node.SubscribeToSearch(node.PS, &f)
logger.Info().Msg("connect to NATS")
go ListenNATS(node)
logger.Info().Msg("Node is actually running.")
}
if isIndexer {
logger.Info().Msg("generate opencloud indexer...")
node.IndexerService = indexer.NewIndexerService(node.Host, ps, 5)
node.IndexerService = indexer.NewIndexerService(node.Host, ps, 500)
}
logger.Info().Msg("connect to NATS")
ListenNATS(*node)
logger.Info().Msg("Node is actually running.")
return node, nil
}
@@ -118,30 +175,27 @@ func (d *Node) Close() {
func (d *Node) publishPeerRecord(
rec *indexer.PeerRecord,
) error {
priv, err := common.LoadKeyFromFilePrivate() // your node private key
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
if err != nil {
return err
}
if common.StreamIndexers[common.ProtocolPublish] == nil {
return errors.New("no protocol Publish is set up on the node")
for _, ad := range common.Indexers.GetAddrs() {
var err error
if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolPublish, "", common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{},
&common.Indexers.MuStream); err != nil {
continue
}
for _, ad := range common.StaticIndexers {
if common.StreamIndexers[common.ProtocolPublish][ad.ID] == nil {
return errors.New("no protocol Publish for peer " + ad.ID.String() + " is set up on the node")
}
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
base := indexer.PeerRecord{
stream := common.Indexers.Streams.GetPerID(common.ProtocolPublish, ad.Info.ID)
base := indexer.PeerRecordPayload{
Name: rec.Name,
DID: rec.DID,
PubKey: rec.PubKey,
ExpiryDate: time.Now().UTC().Add(2 * time.Minute),
}
payload, _ := json.Marshal(base)
hash := sha256.Sum256(payload)
rec.ExpiryDate = base.ExpiryDate
rec.Signature, err = priv.Sign(hash[:])
rec.TTL = 2
rec.PeerRecordPayload = base
rec.Signature, err = priv.Sign(payload)
if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream
return err
}
@@ -149,45 +203,115 @@ func (d *Node) publishPeerRecord(
return nil
}
// SearchPeerRecord starts a distributed peer search via ProtocolSearchPeer.
// userKey identifies the requesting user — a new call cancels any previous
// search for the same user. Results are pushed to onResult as they arrive.
// The function returns when the search stream closes (idle timeout or indexer unreachable).
func (d *Node) SearchPeerRecord(userKey, needle string, onResult func(common.SearchHit)) {
logger := oclib.GetLogger()
ctx, cancel := context.WithCancel(context.Background())
d.activeSearchesMu.Lock()
if prev, ok := d.activeSearches[userKey]; ok {
prev.cancel()
}
queryID := uuid.New().String()
d.activeSearches[userKey] = &activeSearch{queryID: queryID, cancel: cancel}
d.activeSearchesMu.Unlock()
defer func() {
cancel()
d.activeSearchesMu.Lock()
if cur, ok := d.activeSearches[userKey]; ok && cur.queryID == queryID {
delete(d.activeSearches, userKey)
}
d.activeSearchesMu.Unlock()
}()
req := common.SearchPeerRequest{QueryID: queryID}
if pid, err := pp.Decode(needle); err == nil {
req.PeerID = pid.String()
} else if _, err := uuid.Parse(needle); err == nil {
req.DID = needle
} else {
req.Name = needle
}
// Try indexers in pool order until one accepts the stream.
for _, ad := range common.Indexers.GetAddrs() {
if ad.Info == nil {
continue
}
dialCtx, dialCancel := context.WithTimeout(ctx, 5*time.Second)
s, err := d.Host.NewStream(dialCtx, ad.Info.ID, common.ProtocolSearchPeer)
dialCancel()
if err != nil {
continue
}
if err := json.NewEncoder(s).Encode(req); err != nil {
s.Reset()
continue
}
dec := json.NewDecoder(s)
for {
var result common.SearchPeerResult
if err := dec.Decode(&result); err != nil {
break
}
if result.QueryID != queryID {
continue // stale response from a previous query
}
for _, hit := range result.Records {
onResult(hit)
}
}
s.Reset()
return
}
logger.Warn().Str("user", userKey).Msg("[search] no reachable indexer for peer search")
}
func (d *Node) GetPeerRecord(
ctx context.Context,
key string,
pidOrdid string,
) ([]*peer.Peer, error) {
var err error
var info map[string]indexer.PeerRecord
if common.StreamIndexers[common.ProtocolPublish] == nil {
return nil, errors.New("no protocol Publish is set up on the node")
// Build the GetValue request: if pidOrdid is neither a UUID DID nor a libp2p
// PeerID, treat it as a human-readable name and let the indexer resolve it.
// GetPeerRecord resolves by PeerID or DID only.
// Name-based search goes through SearchPeerRecord (ProtocolSearchPeer).
getReq := indexer.GetValue{Key: pidOrdid}
if pidR, pidErr := pp.Decode(pidOrdid); pidErr == nil {
getReq.PeerID = pidR.String()
getReq.Key = ""
}
for _, ad := range common.StaticIndexers {
if common.StreamIndexers[common.ProtocolPublish][ad.ID] == nil {
return nil, errors.New("no protocol Publish for peer " + ad.ID.String() + " is set up on the node")
for _, ad := range common.Indexers.GetAddrs() {
if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolGet, "",
common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{}, &common.Indexers.MuStream); err != nil {
continue
}
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
if err := json.NewEncoder(stream.Stream).Encode(indexer.GetValue{Key: key}); err != nil {
return nil, err
stream := common.Indexers.Streams.GetPerID(common.ProtocolGet, ad.Info.ID)
if err := json.NewEncoder(stream.Stream).Encode(getReq); err != nil {
continue
}
for {
var resp indexer.GetResponse
if err := json.NewDecoder(stream.Stream).Decode(&resp); err != nil {
return nil, err
continue
}
if resp.Found {
info = resp.Records
}
break
}
}
}
var ps []*peer.Peer
for _, pr := range info {
if pk, err := pr.Verify(); err != nil {
return nil, err
} else if ok, p, err := pr.ExtractPeer(d.PeerID.String(), key, pk); err != nil {
} else if _, p, err := pr.ExtractPeer(d.PeerID.String(), pr.PeerID, pk); err != nil {
return nil, err
} else {
if ok {
d.publishPeerRecord(&pr)
}
ps = append(ps, p)
}
}
@@ -202,12 +326,21 @@ func (d *Node) claimInfo(
if endPoint == "" {
return nil, errors.New("no endpoint found for peer")
}
peerID := uuid.New().String()
priv, err := common.LoadKeyFromFilePrivate()
did := uuid.New().String()
peers := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // search by name if no filters are provided
"peer_id": {{Operator: dbs.EQUAL.String(), Value: d.Host.ID().String()}},
},
}, "", false)
if len(peers.Data) > 0 {
did = peers.Data[0].GetID() // if already existing set up did as made
}
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return nil, err
}
pub, err := common.LoadKeyFromFilePublic()
pub, err := tools.LoadKeyFromFilePublic()
if err != nil {
return nil, err
}
@@ -219,37 +352,63 @@ func (d *Node) claimInfo(
now := time.Now().UTC()
expiry := now.Add(150 * time.Second)
rec := &indexer.PeerRecord{
pRec := indexer.PeerRecordPayload{
Name: name,
DID: peerID, // REAL PEER ID
DID: did, // REAL PEER ID
PubKey: pubBytes,
ExpiryDate: expiry,
}
rec.PeerID = d.Host.ID().String()
d.PeerID = d.Host.ID()
payload, _ := json.Marshal(pRec)
payload, _ := json.Marshal(rec)
hash := sha256.Sum256(payload)
rec.Signature, err = priv.Sign(hash[:])
rec := &indexer.PeerRecord{
PeerRecordPayload: pRec,
}
rec.Signature, err = priv.Sign(payload)
if err != nil {
return nil, err
}
rec.PeerID = d.Host.ID().String()
rec.APIUrl = endPoint
rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + " /tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + " /p2p/" + rec.PeerID
rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + "/tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + "/p2p/" + rec.PeerID
rec.NATSAddress = oclib.GetConfig().NATSUrl
rec.WalletAddress = "my-wallet"
rec.ExpiryDate = expiry
if err := d.publishPeerRecord(rec); err != nil {
return nil, err
}
/*if pk, err := rec.Verify(); err != nil {
fmt.Println("Verify")
d.peerRecord = rec
if _, err := rec.Verify(); err != nil {
return nil, err
} else {*/
_, p, err := rec.ExtractPeer(peerID, peerID, pub)
} else {
_, p, err := rec.ExtractPeer(did, did, pub)
b, err := json.Marshal(p)
if err != nil {
return p, err
//}
}
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.PEER,
Method: int(tools.CREATE_RESOURCE),
SearchAttr: "peer_id",
Payload: b,
})
return p, err
}
}
/*
TODO:
- Le booking est un flow neuf décentralisé :
On check on attend une réponse, on valide, il passe par discovery, on relais.
- Le shared workspace est une affaire de décentralisation,
on communique avec les shared les mouvements
- Un shared remplace la notion de partnership à l'échelle de partnershipping
-> quand on share un workspace on devient partenaire temporaire
qu'on le soit originellement ou non.
-> on a alors les mêmes privilèges.
- Les orchestrations admiralty ont le même fonctionnement.
Un evenement provoque alors une création de clé de service.
On doit pouvoir crud avec verification de signature un DBobject.
*/

View File

@@ -1,41 +0,0 @@
package pubsub
import (
"context"
"oc-discovery/daemons/node/common"
"cloud.o-forge.io/core/oc-lib/tools"
)
func (ps *PubSubService) handleEvent(ctx context.Context, topicName string, evt *common.Event) error {
action := ps.getTopicName(topicName)
if err := ps.handleEventSearch(ctx, evt, action); err != nil {
return err
}
return nil
}
func (ps *PubSubService) handleEventSearch( // only : on partner followings. 3 canals for every partner.
ctx context.Context,
evt *common.Event,
action tools.PubSubAction,
) error {
if !(action == tools.PB_SEARCH_RESPONSE || action == tools.PB_SEARCH) {
return nil
}
if p, err := ps.Node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 { // peerFrom is Unique
if err := evt.Verify(p[0]); err != nil {
return err
}
switch action {
case tools.PB_SEARCH: // when someone ask for search.
if err := ps.StreamService.SendResponse(p[0], evt); err != nil {
return err
}
default:
return nil
}
}
return nil
}

View File

@@ -5,63 +5,54 @@ import (
"encoding/json"
"errors"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/stream"
"oc-discovery/models"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
)
func (ps *PubSubService) SearchPublishEvent(
ctx context.Context, dt *tools.DataType, typ string, user string, search string) error {
b, err := json.Marshal(map[string]string{"search": search})
if err != nil {
return err
}
switch typ {
case "known": // define Search Strategy
return ps.StreamService.SearchKnownPublishEvent(dt, user, search) //if partners focus only them*/
return ps.StreamService.PublishesCommon(dt, user, nil, b, stream.ProtocolSearchResource) //if partners focus only them*/
case "partner": // define Search Strategy
return ps.StreamService.SearchPartnersPublishEvent(dt, user, search) //if partners focus only them*/
return ps.StreamService.PublishesCommon(dt, user, &dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
And: map[string][]dbs.Filter{
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.PARTNER}},
},
}, b, stream.ProtocolSearchResource)
case "all": // Gossip PubSub
b, err := json.Marshal(map[string]string{"search": search})
if err != nil {
return err
}
return ps.searchPublishEvent(ctx, dt, user, b)
return ps.publishEvent(ctx, dt, tools.PB_SEARCH, common.TopicPubSubSearch, user, b)
default:
return errors.New("no type of research found")
}
}
func (ps *PubSubService) searchPublishEvent(
ctx context.Context, dt *tools.DataType, user string, payload []byte) error {
id, err := oclib.GenerateNodeID()
if err != nil {
return err
}
if err := ps.subscribeEvents(ctx, dt, tools.PB_SEARCH_RESPONSE, id, 60); err != nil { // TODO Catpure Event !
return err
}
return ps.publishEvent(ctx, dt, tools.PB_SEARCH, user, "", payload, false)
}
func (ps *PubSubService) publishEvent(
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, user string,
peerID string, payload []byte, chanNamedByDt bool,
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, topicName string, user string, payload []byte,
) error {
name := action.String() + "#" + peerID
if chanNamedByDt && dt != nil { // if a datatype is precised then : app.action.datatype#peerID
name = action.String() + "." + (*dt).String() + "#" + peerID
}
from, err := oclib.GenerateNodeID()
priv, err := tools.LoadKeyFromFilePrivate()
if err != nil {
return err
}
priv, err := common.LoadKeyFromFilePrivate()
msg, _ := json.Marshal(models.NewEvent(action.String(), ps.Host.ID().String(), dt, user, payload, priv))
topic := ps.Node.GetPubSub(topicName)
if topic == nil {
topic, err = ps.PS.Join(topicName)
if err != nil {
return err
}
msg, _ := json.Marshal(models.NewEvent(name, from, dt, user, payload, priv))
topic, err := ps.PS.Join(name)
if err != nil {
return err
}
return topic.Publish(ctx, msg)
}

View File

@@ -4,17 +4,13 @@ import (
"context"
"oc-discovery/daemons/node/common"
"oc-discovery/daemons/node/stream"
"strings"
"sync"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/tools"
pubsub "github.com/libp2p/go-libp2p-pubsub"
"github.com/libp2p/go-libp2p/core/host"
)
type PubSubService struct {
*common.LongLivedPubSubService
Node common.DiscoveryPeer
Host host.Host
PS *pubsub.PubSub
@@ -24,24 +20,12 @@ type PubSubService struct {
}
func InitPubSub(ctx context.Context, h host.Host, ps *pubsub.PubSub, node common.DiscoveryPeer, streamService *stream.StreamService) (*PubSubService, error) {
service := &PubSubService{
LongLivedPubSubService: common.NewLongLivedPubSubService(h),
return &PubSubService{
Host: h,
Node: node,
StreamService: streamService,
PS: ps,
}
logger := oclib.GetLogger()
logger.Info().Msg("subscribe to events...")
service.initSubscribeEvents(ctx)
return service, nil
}
func (ps *PubSubService) getTopicName(topicName string) tools.PubSubAction {
ns := strings.Split(topicName, ".")
if len(ns) > 0 {
return tools.GetActionString(ns[0])
}
return tools.NONE
}, nil
}
func (ix *PubSubService) Close() {

View File

@@ -1,45 +0,0 @@
package pubsub
import (
"context"
"oc-discovery/daemons/node/common"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
)
func (ps *PubSubService) initSubscribeEvents(ctx context.Context) error {
if err := ps.subscribeEvents(ctx, nil, tools.PB_SEARCH, "", -1); err != nil {
return err
}
return nil
}
// generic function to subscribe to DHT flow of event
func (ps *PubSubService) subscribeEvents(
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, peerID string, timeout int,
) error {
logger := oclib.GetLogger()
// define a name app.action#peerID
name := action.String() + "#" + peerID
if dt != nil { // if a datatype is precised then : app.action.datatype#peerID
name = action.String() + "." + (*dt).String() + "#" + peerID
}
f := func(ctx context.Context, evt common.Event, topicName string) {
if p, err := ps.Node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
if err := ps.processEvent(ctx, p[0], &evt, topicName); err != nil {
logger.Err(err)
}
}
}
return common.SubscribeEvents(ps.LongLivedPubSubService, ctx, name, -1, f)
}
func (ps *PubSubService) processEvent(
ctx context.Context, p *peer.Peer, event *common.Event, topicName string) error {
if err := event.Verify(p); err != nil {
return err
}
return ps.handleEvent(ctx, topicName, event)
}

View File

@@ -2,35 +2,114 @@ package stream
import (
"context"
"crypto/subtle"
"encoding/json"
"errors"
"fmt"
"oc-discovery/daemons/node/common"
"strings"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/booking/planner"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/models/resources"
"cloud.o-forge.io/core/oc-lib/tools"
)
func (ps *StreamService) getTopicName(topicName string) tools.PubSubAction {
ns := strings.Split(topicName, ".")
if len(ns) > 0 {
return tools.GetActionString(ns[0])
}
return tools.NONE
type Verify struct {
IsVerified bool `json:"is_verified"`
}
func (ps *StreamService) handleEvent(topicName string, evt *common.Event) error {
action := ps.getTopicName(topicName)
if err := ps.handleEventFromPartner(evt, action); err != nil {
func (ps *StreamService) handleEvent(protocol string, evt *common.Event) error {
fmt.Println("handleEvent")
ps.handleEventFromPartner(evt, protocol)
/*if protocol == ProtocolVerifyResource {
if evt.DataType == -1 {
tools.NewNATSCaller().SetNATSPub(tools.VERIFY_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Method: int(tools.VERIFY_RESOURCE),
Payload: evt.Payload,
})
} else if err := ps.verifyResponse(evt); err != nil {
return err
}
if action == tools.PB_SEARCH_RESPONSE {
}*/
if protocol == ProtocolSendPlanner {
if err := ps.sendPlanner(evt); err != nil {
return err
}
}
if protocol == ProtocolSearchResource && evt.DataType > -1 {
if err := ps.retrieveResponse(evt); err != nil {
return err
}
}
if protocol == ProtocolConsidersResource {
if err := ps.pass(evt, tools.PB_CONSIDERS); err != nil {
return err
}
}
if protocol == ProtocolAdmiraltyConfigResource {
if err := ps.pass(evt, tools.PB_ADMIRALTY_CONFIG); err != nil {
return err
}
}
if protocol == ProtocolMinioConfigResource {
if err := ps.pass(evt, tools.PB_MINIO_CONFIG); err != nil {
return err
}
}
return errors.New("no action authorized available : " + protocol)
}
func (abs *StreamService) verifyResponse(event *common.Event) error { //
res, err := resources.ToResource(int(event.DataType), event.Payload)
if err != nil || res == nil {
return nil
}
verify := Verify{
IsVerified: false,
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(event.DataType), nil)
data := access.LoadOne(res.GetID())
if data.Err == "" && data.Data != nil {
if b, err := json.Marshal(data.Data); err == nil {
if res2, err := resources.ToResource(int(event.DataType), b); err == nil {
verify.IsVerified = subtle.ConstantTimeCompare(res.GetSignature(), res2.GetSignature()) == 1
}
}
}
if b, err := json.Marshal(verify); err == nil {
abs.PublishCommon(nil, "", event.From, ProtocolVerifyResource, b)
}
return nil
}
func (abs *StreamService) sendPlanner(event *common.Event) error { //
if len(event.Payload) == 0 {
if plan, err := planner.GenerateShallow(&tools.APIRequest{Admin: true}); err == nil {
if b, err := json.Marshal(plan); err == nil {
abs.PublishCommon(nil, event.User, event.From, ProtocolSendPlanner, b)
} else {
return err
}
} else {
return err
}
} else { // if not empty so it's
m := map[string]interface{}{}
if err := json.Unmarshal(event.Payload, &m); err == nil {
m["peer_id"] = event.From
if pl, err := json.Marshal(m); err == nil {
go tools.NewNATSCaller().SetNATSPub(tools.PLANNER_EXECUTION, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(oclib.BOOKING),
Method: int(tools.PLANNER_EXECUTION),
Payload: pl,
})
}
}
}
return nil
}
@@ -40,81 +119,107 @@ func (abs *StreamService) retrieveResponse(event *common.Event) error { //
return nil
}
b, err := json.Marshal(res.Serialize(res))
go tools.NewNATSCaller().SetNATSPub(tools.CATALOG_SEARCH_EVENT, tools.NATSResponse{
go tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(event.DataType),
Method: int(tools.CATALOG_SEARCH_EVENT),
Method: int(tools.SEARCH_EVENT),
Payload: b,
})
return nil
}
func (ps *StreamService) handleEventFromPartner(evt *common.Event, action tools.PubSubAction) error {
if !(action == tools.PB_CREATE || action == tools.PB_UPDATE || action == tools.PB_DELETE) {
func (abs *StreamService) pass(event *common.Event, action tools.PubSubAction) error { //
if b, err := json.Marshal(&tools.PropalgationMessage{
Action: action,
DataType: int(event.DataType),
Payload: event.Payload,
}); err == nil {
go tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(event.DataType),
Method: int(tools.PROPALGATION_EVENT),
Payload: b,
})
}
return nil
}
resource, err := resources.ToResource(int(evt.DataType), evt.Payload)
}
func (ps *StreamService) handleEventFromPartner(evt *common.Event, protocol string) error {
switch protocol {
case ProtocolSearchResource:
m := map[string]interface{}{}
err := json.Unmarshal(evt.Payload, &m)
if err != nil {
return err
}
b, err := json.Marshal(resource)
if err != nil {
return err
}
switch action {
case tools.PB_SEARCH:
if search, ok := m["search"]; ok {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
peers := access.Search(nil, evt.From, false)
peers := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{
"peer_id": {{Operator: dbs.EQUAL.String(), Value: evt.From}},
},
}, evt.From, false)
if len(peers.Data) > 0 {
p := peers.Data[0].(*peer.Peer)
// TODO : something if peer is missing in our side !
ps.SendResponse(p, evt)
fmt.Println(evt.From, p.GetID(), peers.Data)
ps.SendResponse(p, evt, fmt.Sprintf("%v", search))
} else if p, err := ps.Node.GetPeerRecord(context.Background(), evt.From); err == nil && len(p) > 0 { // peer from is peerID
ps.SendResponse(p[0], evt)
ps.SendResponse(p[0], evt, fmt.Sprintf("%v", search))
}
case tools.PB_CREATE:
case tools.PB_UPDATE:
} else {
fmt.Println("SEND SEARCH_EVENT SetNATSPub", m)
go tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(evt.DataType),
Method: int(tools.SEARCH_EVENT),
Payload: evt.Payload,
})
}
case ProtocolCreateResource, ProtocolUpdateResource:
fmt.Println("RECEIVED Protocol.Update")
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(evt.DataType),
Method: int(tools.CREATE_RESOURCE),
Payload: b,
Payload: evt.Payload,
})
case tools.PB_DELETE:
case ProtocolDeleteResource:
go tools.NewNATSCaller().SetNATSPub(tools.REMOVE_RESOURCE, tools.NATSResponse{
FromApp: "oc-discovery",
Datatype: tools.DataType(evt.DataType),
Method: int(tools.REMOVE_RESOURCE),
Payload: b,
Payload: evt.Payload,
})
default:
return errors.New("no action authorized available : " + action.String())
return errors.New("no action authorized available : " + protocol)
}
return nil
}
func (abs *StreamService) SendResponse(p *peer.Peer, event *common.Event) error {
dts := []oclib.LibDataEnum{oclib.LibDataEnum(event.DataType)}
func (abs *StreamService) SendResponse(p *peer.Peer, event *common.Event, search string) error {
dts := []tools.DataType{tools.DataType(event.DataType)}
if event.DataType == -1 { // expect all resources
dts = []oclib.LibDataEnum{oclib.LibDataEnum(oclib.COMPUTE_RESOURCE), oclib.LibDataEnum(oclib.STORAGE_RESOURCE),
oclib.LibDataEnum(oclib.PROCESSING_RESOURCE), oclib.LibDataEnum(oclib.DATA_RESOURCE), oclib.LibDataEnum(oclib.WORKFLOW_RESOURCE)}
dts = []tools.DataType{
tools.COMPUTE_RESOURCE,
tools.STORAGE_RESOURCE,
tools.PROCESSING_RESOURCE,
tools.DATA_RESOURCE,
tools.WORKFLOW_RESOURCE,
}
var m map[string]string
err := json.Unmarshal(event.Payload, &m)
if err != nil {
}
if self, err := oclib.GetMySelf(); err != nil {
return err
}
} else {
for _, dt := range dts {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(event.DataType), nil)
access := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil)
peerID := p.GetID()
searched := access.Search(abs.FilterPeer(peerID, m["search"]), "", false)
searched := access.Search(abs.FilterPeer(self.GetID(), search), "", false)
fmt.Println("SEND SEARCH_EVENT", self.GetID(), dt, len(searched.Data), peerID)
for _, ss := range searched.Data {
if j, err := json.Marshal(ss); err == nil {
if event.DataType != -1 {
ndt := tools.DataType(dt.EnumIndex())
abs.PublishResources(&ndt, event.User, peerID, j)
} else {
abs.PublishResources(nil, event.User, peerID, j)
_, err := abs.PublishCommon(&dt, event.User, p.PeerID, ProtocolSearchResource, j)
fmt.Println("Publish ERR", err)
}
}
}

View File

@@ -6,78 +6,61 @@ import (
"errors"
"fmt"
"oc-discovery/daemons/node/common"
"time"
oclib "cloud.o-forge.io/core/oc-lib"
"cloud.o-forge.io/core/oc-lib/dbs"
"cloud.o-forge.io/core/oc-lib/models/peer"
"cloud.o-forge.io/core/oc-lib/tools"
"github.com/libp2p/go-libp2p/core/network"
pp "github.com/libp2p/go-libp2p/core/peer"
"github.com/libp2p/go-libp2p/core/protocol"
)
func (ps *StreamService) PublishResources(dt *tools.DataType, user string, toPeerID string, resource []byte) error {
func (ps *StreamService) PublishesCommon(dt *tools.DataType, user string, filter *dbs.Filters, resource []byte, protos ...protocol.ID) error {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
p := access.LoadOne(toPeerID)
if p.Err != "" {
return errors.New(p.Err)
var p oclib.LibDataShallow
if filter == nil {
p = access.LoadAll(false)
} else {
ad, err := pp.AddrInfoFromString(p.Data.(*peer.Peer).StreamAddress)
if err != nil {
p = access.Search(filter, "", false)
}
for _, pes := range p.Data {
for _, proto := range protos {
if _, err := ps.PublishCommon(dt, user, pes.(*peer.Peer).PeerID, proto, resource); err != nil {
return err
}
ps.write(tools.PB_SEARCH, toPeerID, ad, dt, user, resource, ProtocolSearchResource, p.Data.(*peer.Peer).Relation == peer.PARTNER)
}
return nil
}
func (ps *StreamService) SearchKnownPublishEvent(dt *tools.DataType, user string, search string) error {
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
peers := access.Search(&dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
And: map[string][]dbs.Filter{
"": {{Operator: dbs.NOT.String(), Value: dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
And: map[string][]dbs.Filter{
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.BLACKLIST}},
},
}}},
},
}, search, false)
if peers.Err != "" {
return errors.New(peers.Err)
} else {
b, err := json.Marshal(map[string]string{"search": search})
if err != nil {
return err
}
for _, p := range peers.Data {
ad, err := pp.AddrInfoFromString(p.(*peer.Peer).StreamAddress)
if err != nil {
continue
}
ps.write(tools.PB_SEARCH, p.GetID(), ad, dt, user, b, ProtocolSearchResource, p.(*peer.Peer).Relation == peer.PARTNER)
}
}
return nil
}
func (ps *StreamService) SearchPartnersPublishEvent(dt *tools.DataType, user string, search string) error {
if peers, err := ps.searchPeer(fmt.Sprintf("%v", peer.PARTNER.EnumIndex())); err != nil {
return err
} else {
b, err := json.Marshal(map[string]string{"search": search})
func (ps *StreamService) PublishCommon(dt *tools.DataType, user string, toPeerID string, proto protocol.ID, resource []byte) (*common.Stream, error) {
fmt.Println("PublishCommon")
if toPeerID == ps.Key.String() {
fmt.Println("Can't send to ourself !")
return nil, errors.New("Can't send to ourself !")
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
p := access.Search(&dbs.Filters{
And: map[string][]dbs.Filter{ // search by name if no filters are provided
"peer_id": {{Operator: dbs.EQUAL.String(), Value: toPeerID}},
},
}, toPeerID, false)
var pe *peer.Peer
if len(p.Data) > 0 && p.Data[0].(*peer.Peer).Relation != peer.BLACKLIST {
pe = p.Data[0].(*peer.Peer)
} else if pps, err := ps.Node.GetPeerRecord(context.Background(), toPeerID); err == nil && len(pps) > 0 {
pe = pps[0]
}
if pe != nil {
ad, err := pp.AddrInfoFromString(pe.StreamAddress)
if err != nil {
return err
return nil, err
}
for _, p := range peers {
ad, err := pp.AddrInfoFromString(p.StreamAddress)
if err != nil {
continue
fmt.Println("WRITE")
return ps.write(toPeerID, ad, dt, user, resource, proto)
}
ps.write(tools.PB_SEARCH, p.GetID(), ad, dt, user, b, ProtocolSearchResource, true)
}
}
return nil
return nil, errors.New("peer unvalid " + toPeerID)
}
func (ps *StreamService) ToPartnerPublishEvent(
@@ -87,102 +70,82 @@ func (ps *StreamService) ToPartnerPublishEvent(
if err := json.Unmarshal(payload, &p); err != nil {
return err
}
ad, err := pp.AddrInfoFromString(p.StreamAddress)
if err != nil {
if _, err := pp.Decode(p.PeerID); err != nil {
return err
}
ps.mu.Lock()
defer ps.mu.Unlock()
if p.Relation == peer.PARTNER {
if ps.Streams[ProtocolHeartbeatPartner] == nil {
ps.Streams[ProtocolHeartbeatPartner] = map[pp.ID]*common.Stream{}
}
ps.ConnectToPartner(ad.ID, ad)
} else if ps.Streams[ProtocolHeartbeatPartner] != nil && ps.Streams[ProtocolHeartbeatPartner][ad.ID] != nil {
for _, pids := range ps.Streams {
if pids[ad.ID] != nil {
delete(pids, ad.ID)
if pe, err := oclib.GetMySelf(); err != nil {
return err
} else if pe.GetID() == p.GetID() {
return fmt.Errorf("can't send to ourself")
} else {
pe.Relation = p.Relation
pe.Verify = false
if b2, err := json.Marshal(pe); err == nil {
if _, err := ps.PublishCommon(dt, user, p.PeerID, ProtocolUpdateResource, b2); err != nil {
return err
}
}
}
return nil
}
if peers, err := ps.searchPeer(fmt.Sprintf("%v", peer.PARTNER.EnumIndex())); err != nil {
return err
} else {
for _, p := range peers {
for _, protocol := range protocols {
ad, err := pp.AddrInfoFromString(p.StreamAddress)
if err != nil {
continue
}
ps.write(action, p.GetID(), ad, dt, user, payload, protocol, true)
}
ks := []protocol.ID{}
for k := range protocolsPartners {
ks = append(ks, k)
}
var proto protocol.ID
proto = ProtocolCreateResource
switch action {
case tools.PB_DELETE:
proto = ProtocolDeleteResource
case tools.PB_UPDATE:
proto = ProtocolUpdateResource
}
ps.PublishesCommon(dt, user, &dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
And: map[string][]dbs.Filter{
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.PARTNER}},
},
}, payload, proto)
return nil
}
func (s *StreamService) write(
action tools.PubSubAction,
did string,
peerID *pp.AddrInfo,
dt *tools.DataType,
user string,
payload []byte,
proto protocol.ID,
isAPartner bool) error {
proto protocol.ID) (*common.Stream, error) {
logger := oclib.GetLogger()
name := action.String() + "#" + peerID.ID.String()
if dt != nil {
name = action.String() + "." + (*dt).String() + "#" + peerID.ID.String()
var err error
pts := map[protocol.ID]*common.ProtocolInfo{}
for k, v := range protocols {
pts[k] = v
}
s.mu.Lock()
defer s.mu.Unlock()
if s.Streams[proto] == nil {
s.Streams[proto] = map[pp.ID]*common.Stream{}
for k, v := range protocolsPartners {
pts[k] = v
}
if s.Streams[proto][peerID.ID] == nil {
// should create a very temp stream
ctxTTL, err := context.WithTimeout(context.Background(), 60*time.Second)
if err == nil {
if isAPartner {
ctxTTL = context.Background()
}
if s.Host.Network().Connectedness(peerID.ID) != network.Connected {
_ = s.Host.Connect(ctxTTL, *peerID)
str, err := s.Host.NewStream(ctxTTL, peerID.ID, ProtocolHeartbeatPartner)
if err == nil {
s.Streams[ProtocolHeartbeatPartner][peerID.ID] = &common.Stream{
DID: did,
Stream: str,
Expiry: time.Now().UTC().Add(5 * time.Second),
}
str2, err := s.Host.NewStream(ctxTTL, peerID.ID, proto)
if err == nil {
s.Streams[proto][peerID.ID] = &common.Stream{
DID: did,
Stream: str2,
Expiry: time.Now().UTC().Add(5 * time.Second),
}
}
}
if s.Streams, err = common.TempStream(s.Host, *peerID, proto, did, s.Streams, pts, &s.Mu); err != nil {
return nil, errors.New("no stream available for protocol " + fmt.Sprintf("%v", proto) + " from PID " + peerID.ID.String())
}
}
return errors.New("no stream available for protocol " + fmt.Sprintf("%v", proto) + " from PID " + peerID.ID.String())
}
if self, err := oclib.GetMySelf(); err != nil {
return nil, err
} else {
stream := s.Streams[proto][peerID.ID]
enc := json.NewEncoder(stream.Stream)
evt := common.NewEvent(name, peerID.ID.String(), dt, user, payload)
if err := enc.Encode(evt); err != nil {
evt := common.NewEvent(string(proto), self.PeerID, dt, user, payload)
fmt.Println("SEND EVENT ", peerID, proto, evt.From, evt.DataType, evt.Timestamp)
if err := json.NewEncoder(stream.Stream).Encode(evt); err != nil {
stream.Stream.Close()
logger.Err(err)
return nil
return nil, err
}
return nil
if protocolInfo, ok := protocols[proto]; ok && protocolInfo.WaitResponse {
go s.readLoop(stream, peerID.ID, proto, &common.ProtocolInfo{PersistantStream: true})
}
return stream, nil
}
}

View File

@@ -3,7 +3,8 @@ package stream
import (
"context"
"encoding/json"
"fmt"
"errors"
"io"
"oc-discovery/conf"
"oc-discovery/daemons/node/common"
"strings"
@@ -21,18 +22,32 @@ import (
"github.com/libp2p/go-libp2p/core/protocol"
)
const ProtocolConsidersResource = "/opencloud/resource/considers/1.0"
const ProtocolMinioConfigResource = "/opencloud/minio/config/1.0"
const ProtocolAdmiraltyConfigResource = "/opencloud/admiralty/config/1.0"
const ProtocolSearchResource = "/opencloud/resource/search/1.0"
const ProtocolCreateResource = "/opencloud/resource/create/1.0"
const ProtocolUpdateResource = "/opencloud/resource/update/1.0"
const ProtocolDeleteResource = "/opencloud/resource/delete/1.0"
const ProtocolSendPlanner = "/opencloud/resource/planner/1.0"
const ProtocolVerifyResource = "/opencloud/resource/verify/1.0"
const ProtocolHeartbeatPartner = "/opencloud/resource/heartbeat/partner/1.0"
var protocols = []protocol.ID{
ProtocolSearchResource,
ProtocolCreateResource,
ProtocolUpdateResource,
ProtocolDeleteResource,
var protocols = map[protocol.ID]*common.ProtocolInfo{
ProtocolConsidersResource: {WaitResponse: false, TTL: 3 * time.Second},
ProtocolSendPlanner: {WaitResponse: true, TTL: 24 * time.Hour},
ProtocolSearchResource: {WaitResponse: true, TTL: 1 * time.Minute},
ProtocolVerifyResource: {WaitResponse: true, TTL: 1 * time.Minute},
ProtocolMinioConfigResource: {WaitResponse: true, TTL: 1 * time.Minute},
ProtocolAdmiraltyConfigResource: {WaitResponse: true, TTL: 1 * time.Minute},
}
var protocolsPartners = map[protocol.ID]*common.ProtocolInfo{
ProtocolCreateResource: {TTL: 3 * time.Second},
ProtocolUpdateResource: {TTL: 3 * time.Second},
ProtocolDeleteResource: {TTL: 3 * time.Second},
}
type StreamService struct {
@@ -41,8 +56,7 @@ type StreamService struct {
Node common.DiscoveryPeer
Streams common.ProtocolStream
maxNodesConn int
mu sync.Mutex
// Stream map[protocol.ID]map[pp.ID]*daemons.Stream
Mu sync.RWMutex
}
func InitStream(ctx context.Context, h host.Host, key pp.ID, maxNode int, node common.DiscoveryPeer) (*StreamService, error) {
@@ -54,119 +68,78 @@ func InitStream(ctx context.Context, h host.Host, key pp.ID, maxNode int, node c
Streams: common.ProtocolStream{},
maxNodesConn: maxNode,
}
logger.Info().Msg("handle to partner heartbeat protocol...")
service.Host.SetStreamHandler(ProtocolHeartbeatPartner, service.HandlePartnerHeartbeat)
for proto := range protocols {
service.Host.SetStreamHandler(proto, service.HandleResponse)
}
logger.Info().Msg("connect to partners...")
service.connectToPartners() // we set up a stream
go service.StartGC(30 * time.Second)
go service.StartGC(8 * time.Second)
return service, nil
}
func (s *StreamService) HandlePartnerHeartbeat(stream network.Stream) {
pid, hb, err := common.CheckHeartbeat(s.Host, stream, s.maxNodesConn)
if err != nil {
return
func (s *StreamService) HandleResponse(stream network.Stream) {
s.Mu.Lock()
defer s.Mu.Unlock()
stream.Protocol()
if s.Streams[stream.Protocol()] == nil {
s.Streams[stream.Protocol()] = map[pp.ID]*common.Stream{}
}
s.mu.Lock()
defer s.mu.Unlock()
expiry := 1 * time.Minute
if s.Streams[ProtocolHeartbeatPartner] == nil {
s.Streams[ProtocolHeartbeatPartner] = map[pp.ID]*common.Stream{}
if protocols[stream.Protocol()] != nil {
expiry = protocols[stream.Protocol()].TTL
} else if protocolsPartners[stream.Protocol()] != nil {
expiry = protocolsPartners[stream.Protocol()].TTL
}
streams := s.Streams[ProtocolHeartbeatPartner]
// if record already seen update last seen
if rec, ok := streams[*pid]; ok {
rec.DID = hb.DID
rec.Expiry = time.Now().UTC().Add(2 * time.Minute)
} else { // if not in stream ?
pid := stream.Conn().RemotePeer()
ai, err := pp.AddrInfoFromP2pAddr(stream.Conn().RemoteMultiaddr())
if err == nil {
s.ConnectToPartner(pid, ai)
s.Streams[stream.Protocol()][stream.Conn().RemotePeer()] = &common.Stream{
Stream: stream,
Expiry: time.Now().UTC().Add(expiry + 1*time.Minute),
}
}
go s.StartGC(30 * time.Second)
go s.readLoop(s.Streams[stream.Protocol()][stream.Conn().RemotePeer()],
stream.Conn().RemotePeer(),
stream.Protocol(), protocols[stream.Protocol()])
}
func (s *StreamService) connectToPartners() error {
peers, err := s.searchPeer(fmt.Sprintf("%v", peer.PARTNER.EnumIndex()))
if err != nil {
return err
}
for _, p := range peers {
ad, err := pp.AddrInfoFromString(p.StreamAddress)
if err != nil {
continue
}
pid, err := pp.Decode(p.PeerID)
if err != nil {
continue
}
s.ConnectToPartner(pid, ad)
// heartbeat your partner.
}
for _, proto := range protocols {
logger := oclib.GetLogger()
// Register handlers for partner resource protocols (create/update/delete).
// Connections to partners happen on-demand via TempStream when needed.
for proto, info := range protocolsPartners {
f := func(ss network.Stream) {
if s.Streams[proto] == nil {
s.Streams[proto] = map[pp.ID]*common.Stream{}
}
s.Streams[proto][ss.Conn().RemotePeer()] = &common.Stream{
Stream: ss,
Expiry: time.Now().UTC().Add(2 * time.Minute),
Expiry: time.Now().UTC().Add(10 * time.Second),
}
s.readLoop(s.Streams[proto][ss.Conn().RemotePeer()])
go s.readLoop(s.Streams[proto][ss.Conn().RemotePeer()], ss.Conn().RemotePeer(), proto, info)
}
fmt.Println("SetStreamHandler", proto)
logger.Info().Msg("SetStreamHandler " + string(proto))
s.Host.SetStreamHandler(proto, f)
}
// TODO if handle... from partner then HeartBeat back
return nil
}
func (s *StreamService) ConnectToPartner(pid pp.ID, ad *pp.AddrInfo) {
logger := oclib.GetLogger()
for _, proto := range protocols {
f := func(ss network.Stream) {
if s.Streams[proto] == nil {
s.Streams[proto] = map[pp.ID]*common.Stream{}
}
s.Streams[proto][pid] = &common.Stream{
Stream: ss,
Expiry: time.Now().UTC().Add(2 * time.Minute),
}
s.readLoop(s.Streams[proto][pid])
}
if s.Host.Network().Connectedness(ad.ID) != network.Connected {
if err := s.Host.Connect(context.Background(), *ad); err != nil {
logger.Err(err)
continue
}
}
s.Streams = common.AddStreamProtocol(nil, s.Streams, s.Host, proto, pid, s.Key, false, &f)
}
common.SendHeartbeat(context.Background(), ProtocolHeartbeatPartner, conf.GetConfig().Name,
s.Host, s.Streams, []*pp.AddrInfo{ad}, 20*time.Second)
}
func (s *StreamService) searchPeer(search string) ([]*peer.Peer, error) {
/* TODO FOR TEST ONLY A VARS THAT DEFINE ADDRESS... deserialize */
ps := []*peer.Peer{}
if conf.GetConfig().PeerIDS != "" {
for _, peerID := range strings.Split(conf.GetConfig().PeerIDS, ",") {
ppID := strings.Split(peerID, ":")
ppID := strings.Split(peerID, "/")
ps = append(ps, &peer.Peer{
AbstractObject: utils.AbstractObject{
UUID: uuid.New().String(),
Name: ppID[1],
},
PeerID: ppID[1],
StreamAddress: "/ip4/127.0.0.1/tcp/" + ppID[0] + "/p2p/" + ppID[1],
State: peer.ONLINE,
PeerID: ppID[len(ppID)-1],
StreamAddress: peerID,
Relation: peer.PARTNER,
})
}
}
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
peers := access.Search(nil, search, false)
for _, p := range peers.Data {
@@ -194,15 +167,11 @@ func (s *StreamService) StartGC(interval time.Duration) {
}
func (s *StreamService) gc() {
s.mu.Lock()
defer s.mu.Unlock()
s.Mu.Lock()
defer s.Mu.Unlock()
now := time.Now().UTC()
if s.Streams[ProtocolHeartbeatPartner] == nil {
s.Streams[ProtocolHeartbeatPartner] = map[pp.ID]*common.Stream{}
}
streams := s.Streams[ProtocolHeartbeatPartner]
for pid, rec := range streams {
for pid, rec := range s.Streams[ProtocolHeartbeatPartner] {
if now.After(rec.Expiry) {
for _, sstreams := range s.Streams {
if sstreams[pid] != nil {
@@ -214,28 +183,52 @@ func (s *StreamService) gc() {
}
}
func (ps *StreamService) readLoop(s *common.Stream) {
func (ps *StreamService) readLoop(s *common.Stream, id pp.ID, proto protocol.ID, protocolInfo *common.ProtocolInfo) {
defer s.Stream.Close()
defer func() {
ps.Mu.Lock()
defer ps.Mu.Unlock()
delete(ps.Streams[proto], id)
}()
loop := true
if !protocolInfo.PersistantStream && !protocolInfo.WaitResponse { // 2 sec is enough... to wait a response
time.AfterFunc(2*time.Second, func() {
loop = false
})
}
for {
if !loop {
break
}
var evt common.Event
if err := json.NewDecoder(s.Stream).Decode(&evt); err != nil {
s.Stream.Close()
// Any decode error (EOF, reset, malformed JSON) terminates the loop;
// continuing on a dead/closed stream creates an infinite spin.
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
strings.Contains(err.Error(), "reset") ||
strings.Contains(err.Error(), "closed") ||
strings.Contains(err.Error(), "too many connections") {
return
}
continue
}
ps.handleEvent(evt.Type, &evt)
if protocolInfo.WaitResponse && !protocolInfo.PersistantStream {
break
}
}
}
func (abs *StreamService) FilterPeer(peerID string, search string) *dbs.Filters {
id, err := oclib.GetMySelf()
p, err := oclib.GetMySelf()
if err != nil {
return nil
}
filter := map[string][]dbs.Filter{
"creator_id": {{Operator: dbs.EQUAL.String(), Value: id}}, // is my resource...
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: p.GetID()}}, // is my resource...
"": {{Operator: dbs.OR.String(), Value: &dbs.Filters{
Or: map[string][]dbs.Filter{
"abstractobject.access_mode": {{Operator: dbs.EQUAL.String(), Value: 1}}, // if public
"abstractinstanciatedresource.abstractresource.abstractobject.access_mode": {{Operator: dbs.EQUAL.String(), Value: 1}}, // if public
"abstractinstanciatedresource.instances": {{Operator: dbs.ELEMMATCH.String(), Value: &dbs.Filters{ // or got a partners instances
And: map[string][]dbs.Filter{
"resourceinstance.partnerships": {{Operator: dbs.ELEMMATCH.String(), Value: &dbs.Filters{
@@ -248,15 +241,15 @@ func (abs *StreamService) FilterPeer(peerID string, search string) *dbs.Filters
},
}}},
}
if search != "" {
filter[" "] = []dbs.Filter{{Operator: dbs.OR.String(), Value: &dbs.Filters{
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
"abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractintanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractintanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractintanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractintanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractintanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
"abstractinstanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
},
}}}
}

View File

@@ -1,23 +1,33 @@
#!/bin/bash
IMAGE_BASE_NAME="oc-discovery"
DOCKERFILE_PATH="."
for i in {0..3}; do
docker network create \
--subnet=172.40.0.0/24 \
discovery
for i in $(seq ${1:-0} ${2:-3}); do
NUM=$((i + 1))
PORT=$((4000 + $NUM))
IMAGE_NAME="${IMAGE_BASE_NAME}:${NUM}"
echo "▶ Building image ${IMAGE_NAME} with CONF_NUM=${NUM}"
docker build \
--build-arg CONF_NUM=${NUM} \
-t ${IMAGE_NAME} \
-t "${IMAGE_BASE_NAME}_${NUM}" \
${DOCKERFILE_PATH}
docker kill "${IMAGE_BASE_NAME}_${NUM}" | true
docker rm "${IMAGE_BASE_NAME}_${NUM}" | true
echo "▶ Running container ${IMAGE_NAME} on port ${PORT}:${PORT}"
docker run -d \
--network="${3:-oc}" \
-p ${PORT}:${PORT} \
--name "${IMAGE_BASE_NAME}_${NUM}" \
${IMAGE_NAME}
"${IMAGE_BASE_NAME}_${NUM}"
docker network connect --ip "172.40.0.${NUM}" discovery "${IMAGE_BASE_NAME}_${NUM}"
done

View File

@@ -4,5 +4,5 @@
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "indexer",
"NODE_ENDPOINT_PORT": 4002,
"INDEXER_ADDRESSES": "/ip4/oc-discovery1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
"INDEXER_ADDRESSES": "/ip4/172.40.0.1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
}

View File

@@ -4,5 +4,5 @@
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "node",
"NODE_ENDPOINT_PORT": 4003,
"INDEXER_ADDRESSES": "/ip4/oc-discovery2/tcp/4002/p2p/12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"
"INDEXER_ADDRESSES": "/ip4/172.40.0.2/tcp/4002/p2p/12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"
}

View File

@@ -4,6 +4,5 @@
"NATS_URL": "nats://nats:4222",
"NODE_MODE": "node",
"NODE_ENDPOINT_PORT": 4004,
"INDEXER_ADDRESSES": "/ip4/oc-discovery1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu",
"PEER_IDS": "/ip4/oc-discovery3/tcp/4003/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
"INDEXER_ADDRESSES": "/ip4/172.40.0.1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,362 @@
================================================================================
OC-DISCOVERY : ARCHITECTURE CIBLE — RÉSEAU DHT SANS NATIFS
Vision d'évolution long terme, issue d'une analyse comparative
================================================================================
Rédigé à partir de l'analyse de l'architecture actuelle et de la discussion
comparative avec Tapestry, Kademlia, EigenTrust et les systèmes de réputation
distribués.
Référence : DECENTRALIZED_SYSTEMS_COMPARISON.txt §9
================================================================================
1. MOTIVATION
================================================================================
L'architecture actuelle (node → indexer → native indexer) est robuste et bien
adaptée à une phase précoce du réseau. Ses limites à l'échelle sont :
- Pool de natives statique au démarrage → dépendance à la configuration
- Cache local des natives = point de défaillance unique (perte = pool vide)
- Consensus inter-natives bloquant (~7s) déclenché à chaque bootstrap node
- État O(N indexers) par native → croît linéairement avec le réseau
- Nœuds privilégiés structurellement → SPOFs relatifs
La cible décrite ici supprime la notion de native indexer en tant que tier
architectural. Le réseau devient plat : indexers et nodes sont des acteurs
de même nature, différenciés uniquement par leur rôle volontaire.
================================================================================
2. PRINCIPES FONDAMENTAUX
================================================================================
P1. Aucun nœud n'est structurellement privilégié.
P2. La confiance est un produit du temps et de la vérification, pas d'un arbitre.
P3. Les claims d'un acteur sont vérifiables indépendamment par tout pair.
P4. La réputation émerge du comportement collectif, pas d'un signalement central.
P5. La DHT est une infrastructure neutre — elle stocke des faits, pas des jugements.
P6. La configuration statique n'existe plus au runtime — seulement au bootstrap.
================================================================================
3. RÔLES
================================================================================
3.1 Node
--------
Consommateur du réseau. Démarre, sélectionne un pool d'indexers via DHT,
heartbeat ses indexers, accumule des scores localement. Ne publie rien en
routine. Participe aux challenges de consensus à la demande.
3.2 Indexer
-----------
Acteur volontaire. S'inscrit dans la DHT à la naissance, maintient son record,
sert le trafic des nodes (heartbeat, Publish, Get). Déclare ses métriques dans
chaque réponse heartbeat. Maintient un score agrégé depuis ses nodes connectés.
Différence avec l'actuel : l'indexer n'a plus de lien avec une native.
Il est autonome. Son existence dans le réseau est prouvée par son record DHT
et par les nodes qui le contactent directement.
3.3 Nœud DHT infrastructure (ex-native)
----------------------------------------
N'importe quel nœud suffisamment stable peut maintenir la DHT sans être un
indexer. C'est une configuration, pas un type architectural : `dht_mode: server`.
Ces nœuds maintiennent les k-buckets Kademlia et stockent les records des
indexers. Ils ne connaissent pas le trafic node↔indexer et ne l'orchestrent pas.
================================================================================
4. BOOTSTRAP D'UN NODE
================================================================================
4.1 Entrée dans le réseau
-------------------------
Le node démarre avec 1 à 3 adresses de nœuds DHT connus (bootstrap peers).
Ce sont les seules informations statiques nécessaires. Ces peers n'ont pas de
rôle sémantique — ils servent uniquement à entrer dans l'overlay DHT.
4.2 Découverte du pool d'indexers
----------------------------------
Node → DHT.FindProviders(hash("/opencloud/indexers"))
→ reçoit une liste de N candidats avec leurs records
Sélection du pool initial :
1. Filtre latence : ping < seuil → proximité réseau réelle
2. Filtre fill rate : préférer les indexers moins chargés
3. Tirage pondéré : probabilité ∝ (1 - fill_rate), courbe w(F) = F×(1-F)
indexer à 20% charge → très probable
indexer à 80% charge → peu probable
4. Filtre diversité : subnet /24 différent pour chaque entrée du pool
Aucun consensus nécessaire à cette étape. Le node démarre avec une tolérance
basse (voir §7) — il accepte des indexers imparfaits et les évalue au fil du temps.
================================================================================
5. REGISTRATION D'UN INDEXER DANS LA DHT
================================================================================
À la naissance, l'indexer publie son record DHT :
clé : hash("/opencloud/indexers") ← clé fixe, connue de tous
valeur: {
multiaddr : <adresse réseau>,
region : <subnet /24>,
capacity : <maxNodesConn>,
fill_rate : <float 0-1>, ← auto-déclaré, vérifiable
peer_count : <int>, ← auto-déclaré, vérifiable
peers : [hash(nodeID1), ...], ← liste hashée des nodes connectés
born_at : <timestamp>,
sig : <signature clé indexer>, ← non-forgeable (PSK context)
}
Le record est rafraîchi toutes les ~60s (avant expiration du TTL).
Si l'indexer tombe : TTL expire → disparaît de la DHT automatiquement.
La peer list est hashée pour la confidentialité mais reste vérifiable :
un challenger peut demander directement à un node s'il est connecté à cet indexer.
================================================================================
6. PROTOCOLE HEARTBEAT — QUESTION ET RÉPONSE
================================================================================
Le heartbeat devient bidirectionnel : le node pose des questions, l'indexer
répond avec ses déclarations courantes.
6.1 Structure
-------------
Node → Indexer :
{
ts : now,
challenge : <optionnel, voir §8>
}
Indexer → Node :
{
ts : now,
fill_rate : 0.42,
peer_count : 87,
cached_score : 0.74, ← score agrégé depuis tous ses nodes connectés
challenge_response : {...} ← si challenge présent dans la requête
}
Le heartbeat normal (sans challenge) est quasi-identique à l'actuel en poids.
Le cached_score indexer est mis à jour progressivement par les feedbacks reçus.
6.2 Le cached_score de l'indexer
---------------------------------
L'indexer agrège les scores que ses nodes connectés lui communiquent
(implicitement via le fait qu'ils restent connectés, ou explicitement lors
d'un consensus). Ce score lui donne une vision de sa propre qualité réseau.
Un node peut comparer son score local de l'indexer avec le cached_score déclaré.
Une forte divergence est un signal d'alerte.
Score local node : 0.40 ← cet indexer est médiocre pour moi
Cached score : 0.91 ← il se prétend excellent globalement
→ déclenche un challenge de vérification
================================================================================
7. MODÈLE DE CONFIANCE PROGRESSIVE
================================================================================
7.1 Cycle de vie d'un node
---------------------------
Naissance
→ tolérance basse : accepte presque n'importe quel indexer du DHT
→ switching cost faible : peu de contexte accumulé
→ minScore ≈ 20% (dynamicMinScore existant, conservé)
Quelques heures
→ uptime s'accumule sur chaque indexer connu
→ scores se stabilisent
→ seuil de remplacement qui monte progressivement
Long terme (jours)
→ pool stable, confiance élevée sur les indexers connus
→ switching coûteux mais déclenché sur déception franche
→ minScore ≈ 80% (maturité)
7.2 Modèle sous-jacent : beta distribution implicite
------------------------------------------------------
α = succès cumulés (heartbeats OK, probes OK, challenges réussis)
β = échecs cumulés (timeouts, probes échoués, challenges ratés)
confiance = α / (α + β)
Nouveau indexer : α=0, β=0 → prior neutre, tolérance basse
Après 10 jours : α élevé → confiance stable, seuil de switch élevé
Déception franche : β monte → confiance chute → switch déclenché
7.3 Ce que "décevoir" signifie
--------------------------------
Heartbeat rate → trop de timeouts → fiabilité en baisse
Bandwidth probe → chute sous déclaré → dégradation ou mensonge
Fill rate réel → supérieur au déclaré → indexer surchargé ou malhonnête
Challenge échoué → peer déclaré absent du réseau → claim invalide
Latence → dérive progressive → qualité réseau dégradée
Cached_score gonflé → divergence forte avec score local → suspicion
================================================================================
8. VÉRIFICATION DES CLAIMS — TROIS COUCHES
================================================================================
8.1 Couche 1 : passive (chaque heartbeat, 60s)
-----------------------------------------------
Mesures automatiques, zéro coût supplémentaire.
- RTT du heartbeat → latence directe
- fill_rate déclaré → tiny payload dans la réponse
- peer_count déclaré → tiny payload
- cached_score indexer → comparé au score local
8.2 Couche 2 : sampling actif (1 heartbeat sur N)
--------------------------------------------------
Vérifications périodiques, asynchrones, légères.
Tous les 5 HB (~5min) : spot-check 1 peer aléatoire (voir §8.4)
Tous les 10 HB (~10min): vérification diversité subnet (lookups DHT légers)
Tous les 15 HB (~15min): bandwidth probe (transfert réel, protocole dédié)
8.3 Couche 3 : consensus (événementiel)
-----------------------------------------
Déclenché sur : admission d'un nouvel indexer dans le pool, ou suspicion détectée.
Node sélectionne une claim vérifiable de l'indexer cible X
Node vérifie lui-même
Node demande à ses indexers de confiance : "vérifiez cette claim sur X"
Chaque indexer vérifie indépendamment
Convergence des résultats → X est honnête → admission
Divergence → X est suspect → rejet ou probation
Le consensus est léger : quelques contacts out-of-band, pas de round bloquant.
Il n'est pas continu — il est événementiel.
8.4 Vérification out-of-band (pas de DHT writes par les nodes)
----------------------------------------------------------------
Les nodes ne publient PAS de contact records continus dans la DHT.
Cela éviterait N×M records à rafraîchir (coût DHT élevé à l'échelle).
À la place, lors d'un challenge :
Challenger sélectionne 2-3 peers dans la peer list déclarée par X
→ contacte ces peers directement : "es-tu connecté à indexer X ?"
→ réponse directe (out-of-band, pas via DHT)
→ vérification sans écriture DHT
L'indexer ne peut pas faire répondre "oui" à des peers qui ne lui sont pas
connectés. La vérification est non-falsifiable et sans coût DHT.
8.5 Pourquoi X ne peut pas tricher
------------------------------------
X ne peut pas coordonner des réponses différentes vers des challengers
simultanés. Chaque challenger contacte indépendamment les mêmes peers.
Si X ment sur sa peer list :
- Challenger A contacte peer P → "non, pas connecté à X"
- Challenger B contacte peer P → "non, pas connecté à X"
- Consensus : X ment → score chute chez tous les challengers
- Effet réseau : progressivement, X perd ses connections
- Peer list DHT se vide → claims futures encore moins crédibles
================================================================================
9. EFFET RÉSEAU SANS SIGNALEMENT CENTRAL
================================================================================
Un node qui pénalise un indexer n'envoie aucun "rapport" à quiconque.
Ses actions locales produisent l'effet réseau par agrégation :
Node baisse le score de X → X reçoit moins de trafic de ce node
Node switche vers Y → X perd un client
Node refuse les challenges X → X ne peut plus participer aux consensus
Si 200 nodes font pareil :
X perd la majorité de ses connections
Sa peer list DHT se vide (peers contactés directement disent "non")
Son cached_score s'effondre (peu de nodes restent)
Les nouveaux nodes qui voient X dans la DHT obtiennent des challenges échoués
X est naturellement exclu sans aucune décision centrale
Inversement, un indexer honnête voit ses scores monter sur tous ses nodes
connectés, sa peer list se densifier, ses challenges réussis systématiquement.
Sa réputation est un produit observable et vérifiable.
================================================================================
10. RÉSUMÉ DE L'ARCHITECTURE
================================================================================
DHT → annuaire neutre, vérité des records indexers
maintenu par tout nœud stable (dht_mode: server)
Indexer → acteur volontaire, s'inscrit, maintient ses claims,
sert le trafic, accumule son propre score agrégé
Node → consommateur, score passif + sampling + consensus léger,
confiance progressive, switching adaptatif
Heartbeat → métronome 60s + vecteur de déclarations légères + challenge optionnel
Consensus → événementiel, multi-challengers indépendants,
vérification out-of-band sur claims DHT
Confiance → beta implicite, progressive, switching cost croissant avec l'âge
Réputation → émerge du comportement collectif, aucun arbitre central
Bootstrap → 1-3 peers DHT connus → seule configuration statique nécessaire
================================================================================
11. TRAJECTOIRE DE MIGRATION
================================================================================
Phase 1 (actuel)
Natives statiques, pool indexers dynamique, consensus inter-natives
→ robuste, adapté à la phase précoce
Phase 2 (intermédiaire)
Pool de natives dynamique via DHT (bootstrap + gossip)
Même protocole natif, juste la découverte devient dynamique
→ supprime la dépendance à la configuration statique des natives
→ voir DECENTRALIZED_SYSTEMS_COMPARISON.txt §9.2
Phase 3 (cible)
Architecture décrite dans ce document
Natives disparaissent en tant que tier architectural
DHT = infrastructure, indexers = acteurs autonomes
Scoring et consensus entièrement côté node
→ aucun nœud privilégié, scalabilité O(log N)
La migration Phase 2 → Phase 3 est une refonte du plan de contrôle.
Le plan de données (heartbeat node↔indexer, Publish, Get) est inchangé.
Les primitives libp2p (Kademlia DHT, GossipSub) sont déjà présentes.
================================================================================
12. PROPRIÉTÉS DU SYSTÈME CIBLE
================================================================================
Scalabilité O(log N) — routage DHT Kademlia
Résilience Pas de SPOF structurel, TTL = seule source de vérité
Confiance Progressive, vérifiable, émergente
Sybil resistance PSK — seuls les nœuds avec la clé peuvent publier
Cold start Tolérance basse initiale, montée progressive (existant)
Honnêteté Claims vérifiables out-of-band, non-falsifiables
Décentralisation Aucun nœud ne connaît l'état global complet
================================================================================

View File

@@ -0,0 +1,64 @@
@startuml
title Node Initialization — Peer A (InitNode)
participant "main (Peer A)" as MainA
participant "Node A" as NodeA
participant "libp2p (Peer A)" as libp2pA
participant "ConnectionGater A" as GaterA
participant "DB Peer A (oc-lib)" as DBA
participant "NATS A" as NATSA
participant "Indexer (shared)" as IndexerA
participant "DHT A" as DHTA
participant "StreamService A" as StreamA
participant "PubSubService A" as PubSubA
MainA -> NodeA: InitNode(isNode=true, isIndexer=false)
NodeA -> NodeA: LoadKeyFromFilePrivate() → priv
NodeA -> NodeA: LoadPSKFromFile() → psk
NodeA -> GaterA: newOCConnectionGater(nil)
NodeA -> libp2pA: New(\n PrivateNetwork(psk),\n Identity(priv),\n ListenAddr: tcp/4001,\n ConnectionGater(gater)\n)
libp2pA --> NodeA: host A (PeerID_A)
NodeA -> GaterA: gater.host = host A
note over GaterA: InterceptSecured (inbound):\n1. DB lookup by peer_id\n → BLACKLIST : refuse\n → found : accept\n2. Not found → DHT sequential check\n (transport-error fallthrough only)
NodeA -> libp2pA: SetStreamHandler(/opencloud/probe/1.0, HandleBandwidthProbe)
NodeA -> libp2pA: SetStreamHandler(/opencloud/witness/1.0, HandleWitnessQuery)
NodeA -> libp2pA: NewGossipSub(ctx, host) → ps (GossipSub)
NodeA -> NodeA: buildRecord() closure\n→ signs fresh PeerRecord (expiry=now+2min)\n embedded in each heartbeat tick
NodeA -> IndexerA: ConnectToIndexers(host, minIndexer=1, maxIndexer=5, buildRecord)
note over IndexerA: Reads IndexerAddresses from config\nAdds seeds → Indexers Directory (IsSeed=true)\nLaunches SendHeartbeat goroutine (20s ticker)
IndexerA -> DHTA: proactive DHT discovery (after 5s warmup)\ninitNodeDHT(h, seeds)\nDiscoverIndexersFromDHT → SelectByFillRate\n→ add to Indexers Directory + NudgeIt()
NodeA -> NodeA: claimInfo(name, hostname)
NodeA -> IndexerA: TempStream /opencloud/record/publish/1.0
NodeA -> IndexerA: stream.Encode(Signed PeerRecord A)
IndexerA -> DHTA: PutValue("/node/"+DID_A, record)
NodeA -> NodeA: StartGC(30s)
NodeA -> StreamA: InitStream(ctx, host, PeerID_A, 1000, nodeA)
StreamA -> StreamA: SetStreamHandler(resource/search, create, update,\n delete, planner, verify, considers)
StreamA --> NodeA: StreamService A
NodeA -> PubSubA: InitPubSub(ctx, host, ps, nodeA, streamA)
PubSubA -> PubSubA: subscribeEvents(PB_SEARCH, timeout=-1)
PubSubA --> NodeA: PubSubService A
NodeA -> NodeA: SubscribeToSearch(ps, callback)
note over NodeA: callback: if evt.From != self\n → GetPeerRecord(evt.From)\n → StreamService.SendResponse
NodeA -> NATSA: ListenNATS(nodeA)
note over NATSA: Subscribes:\nCREATE_RESOURCE → partner on-demand\nPROPALGATION_EVENT → resource propagation
NodeA --> MainA: *Node A is ready
note over NodeA,IndexerA: SendHeartbeat goroutine (permanent, 20s ticker):\nNode → Indexer : Heartbeat{name, PeerID, indexersBinded, need, challenges?, record}\nIndexer → Node : HeartbeatResponse{fillRate, challenges, suggestions, witnesses, suggestMigrate}\nScore updated (7 dimensions), pool managed autonomously
@enduml

View File

@@ -0,0 +1,38 @@
@startuml
title Node Claim — Peer A publish its PeerRecord (claimInfo + publishPeerRecord)
participant "DB Peer A (oc-lib)" as DBA
participant "Node A" as NodeA
participant "Indexer (shared)" as IndexerA
participant "DHT Kademlia" as DHT
participant "NATS A" as NATSA
NodeA -> DBA: DB(PEER).Search(SELF)
DBA --> NodeA: existing peer (DID_A) or new UUID
NodeA -> NodeA: LoadKeyFromFilePrivate() → priv A
NodeA -> NodeA: LoadKeyFromFilePublic() → pub A
NodeA -> NodeA: Build PeerRecord A {\n Name, DID, PubKey,\n PeerID: PeerID_A,\n APIUrl: hostname,\n StreamAddress: /ip4/.../tcp/4001/p2p/PeerID_A,\n NATSAddress, WalletAddress\n}
NodeA -> NodeA: priv.Sign(rec) → signature
NodeA -> NodeA: rec.ExpiryDate = now + 150s
loop For every Node Binded Indexer (Indexer A, B, ...)
NodeA -> IndexerA: TempStream /opencloud/record/publish/1.0
NodeA -> IndexerA: strea!.Encode(Signed PeerRecord A)
IndexerA -> IndexerA: Verify signature
IndexerA -> IndexerA: Check PeerID_A heartbeat stream
IndexerA -> DHT: PutValue("/node/"+DID_A, PeerRecord A)
DHT --> IndexerA: ok
end
NodeA -> NodeA: rec.ExtractPeer(DID_A, DID_A, pub A)
NodeA -> NATSA: SetNATSPub(CREATE_RESOURCE, {PEER, Peer A JSON})
NATSA -> DBA: Upsert Peer A (SearchAttr: peer_id)
DBA --> NATSA: ok
NodeA --> NodeA: *peer.Peer A (SELF)
@enduml

View File

@@ -0,0 +1,59 @@
@startuml indexer_heartbeat
title Heartbeat bidirectionnel node → indexeur (scoring 7 dimensions + challenges)
participant "Node A" as NodeA
participant "Node B" as NodeB
participant "IndexerService" as Indexer
note over NodeA,NodeB: SendHeartbeat goroutine — tick every 20s
== Tick Node A ==
NodeA -> Indexer: NewStream /opencloud/heartbeat/1.0\n(long-lived, réutilisé aux ticks suivants)
NodeA -> Indexer: stream.Encode(Heartbeat{\n name, PeerID_A, timestamp,\n indexersBinded: [addr1, addr2],\n need: maxPool - len(pool),\n challenges: [PeerID_A, PeerID_B], ← batch (tous les 1-10 HBs)\n challengeDID: "uuid-did-A", ← DHT challenge (tous les 5 batches)\n record: SignedPeerRecord_A ← expiry=now+2min\n})
Indexer -> Indexer: CheckHeartbeat(stream, maxNodes)\n→ len(Peers()) >= maxNodes → reject
Indexer -> Indexer: HandleHeartbeat → UptimeTracker.RecordHeartbeat()\n→ gap ≤ 2×interval : TotalOnline += gap
Indexer -> Indexer: Republish PeerRecord A to DHT\nDHT.PutValue("/node/"+DID_A, record_A)
== Réponse indexeur → node A ==
Indexer -> Indexer: BuildHeartbeatResponse(remotePeer=A, need, challenges, challengeDID)\n\nfillRate = connected_nodes / MaxNodesConn()\npeerCount = connected_nodes\nmaxNodes = MaxNodesConn()\nbornAt = time of indexer startup\n\nChallenges: pour chaque PeerID challengé\n found = PeerID dans StreamRecords[ProtocolHeartbeat]?\n lastSeen = HeartbeatStream.UptimeTracker.LastSeen\n\nDHT challenge:\n DHT.GetValue("/node/"+challengeDID, timeout=3s)\n → dhtFound + dhtPayload\n\nWitnesses: jusqu'à 3 AddrInfos de nœuds connectés\n (adresses connues dans Peerstore)\n\nSuggestions: jusqu'à `need` indexeurs depuis dhtCache\n (refresh asynchrone 2min, SelectByFillRate)\n\nSuggestMigrate: fillRate > 80%\n ET node dans offload.inBatch (batch ≤ 5, grace 3×HB)
Indexer --> NodeA: stream.Encode(HeartbeatResponse{\n fillRate, peerCount, maxNodes, bornAt,\n challenges, dhtFound, dhtPayload,\n witnesses, suggestions, suggestMigrate\n})
== Traitement score côté Node A ==
NodeA -> NodeA: score = ensureScore(Indexers, addr_indexer)\nscore.UptimeTracker.RecordHeartbeat()\n\nlatencyScore = max(0, 1 - RTT / (BaseRoundTrip × 10))\n\nBornAt stability:\n bornAt changed? → score.bornAtChanges++\n\nfillConsistency:\n expected = peerCount / maxNodes\n |expected - fillRate| < 10% → fillConsistent++\n\nChallenge PeerID (ground truth own PeerID):\n found=true AND lastSeen < 2×interval → challengeCorrect++\n\nDHT challenge:\n dhtFound=true → dhtSuccess++\n\nWitness query (async):\n go queryWitnesses(h, indexerID, bornAt, fillRate, witnesses, score)
NodeA -> NodeA: score.Score = ComputeNodeSideScore(latencyScore)\n\nScore = (\n 0.20 × uptimeRatio\n+ 0.20 × challengeAccuracy\n+ 0.15 × latencyScore\n+ 0.10 × fillScore ← 1 - fillRate\n+ 0.10 × fillConsistency\n+ 0.15 × witnessConsistency\n+ 0.10 × dhtSuccessRate\n) × 100 × bornAtPenalty\n\nbornAtPenalty = max(0, 1 - 0.30 × bornAtChanges)\nminScore = clamp(20 + 60 × (age.Hours/24), 20, 80)
alt score < minScore\n AND TotalOnline ≥ 2×interval\n AND !IsSeed\n AND len(pool) > 1
NodeA -> NodeA: evictPeer(dir, addr, id, proto)\n→ delete Addr + Score + Stream\ngo TriggerConsensus(h, voters, need)\n ou replenishIndexersFromDHT(h, need)
end
alt resp.SuggestMigrate == true AND nonSeedCount >= MinIndexer
alt IsSeed
NodeA -> NodeA: score.IsSeed = false\n(de-stickied — score eviction maintenant possible)
else !IsSeed
NodeA -> NodeA: evictPeer → migration acceptée
end
end
alt len(resp.Suggestions) > 0
NodeA -> NodeA: handleSuggestions(dir, indexerID, suggestions)\n→ inconnus ajoutés à Indexers Directory\n→ NudgeIt() si ajout effectif
end
== Tick Node B (concurrent) ==
NodeB -> Indexer: stream.Encode(Heartbeat{PeerID_B, ...})
Indexer -> Indexer: CheckHeartbeat → UptimeTracker → BuildHeartbeatResponse
Indexer --> NodeB: HeartbeatResponse{...}
== GC côté Indexeur ==
note over Indexer: GC ticker 30s — gc()\nnow.After(Expiry) où Expiry = lastHBTime + 2min\n→ AfterDelete(pid, name, did) hors lock\n→ publishNameEvent(NameIndexDelete, ...)\nFillRate recalculé automatiquement
@enduml

View File

@@ -0,0 +1,47 @@
@startuml
title Indexer — Peer A publishing, Peer B publishing (handleNodePublish → DHT)
participant "Node A" as NodeA
participant "Node B" as NodeB
participant "IndexerService (shared)" as Indexer
participant "DHT Kademlia" as DHT
note over NodeA: Start after claimInfo or refresh TTL
par Peer A publish its PeerRecord
NodeA -> Indexer: TempStream /opencloud/record/publish/1.0
NodeA -> Indexer: stream.Encode(PeerRecord A {DID_A, PeerID_A, PubKey_A, Expiry, Sig_A})
Indexer -> Indexer: Verify sig_A (reconstruit rec minimal, pubKey_A.Verify)
Indexer -> Indexer: Check StreamRecords[Heartbeat][PeerID_A] existe
alt A active Heartbeat
Indexer -> Indexer: StreamRecord A → DID_A, Record=PeerRecord A, LastSeen=now
Indexer -> DHT: PutValue("/node/"+DID_A, PeerRecord A JSON)
Indexer -> DHT: PutValue("/name/"+name_A, DID_A)
Indexer -> DHT: PutValue("/peer/"+peer_id_A, DID_A)
DHT --> Indexer: ok
else Pas de heartbeat
Indexer -> NodeA: (erreur "no heartbeat", stream close)
end
else Peer B publish its PeerRecord
NodeB -> Indexer: TempStream /opencloud/record/publish/1.0
NodeB -> Indexer: stream.Encode(PeerRecord B {DID_B, PeerID_B, PubKey_B, Expiry, Sig_B})
Indexer -> Indexer: Verify sig_B
Indexer -> Indexer: Check StreamRecords[Heartbeat][PeerID_B] existe
alt B Active Heartbeat
Indexer -> Indexer: StreamRecord B → DID_B, Record=PeerRecord B, LastSeen=now
Indexer -> DHT: PutValue("/node/"+DID_B, PeerRecord B JSON)
Indexer -> DHT: PutValue("/name/"+name_B, DID_B)
Indexer -> DHT: PutValue("/peer/"+peer_id_B, DID_B)
DHT --> Indexer: ok
else Pas de heartbeat
Indexer -> NodeB: (erreur "no heartbeat", stream close)
end
end par
note over DHT: DHT got \n"/node/DID_A" et "/node/DID_B"
@enduml

View File

@@ -0,0 +1,51 @@
@startuml
title Indexer — Peer A discover Peer B (GetPeerRecord + handleNodeGet)
participant "NATS A" as NATSA
participant "DB Pair A (oc-lib)" as DBA
participant "Node A" as NodeA
participant "IndexerService (partagé)" as Indexer
participant "DHT Kademlia" as DHT
participant "NATS A (retour)" as NATSA2
note over NodeA: Trigger : NATS PB_SEARCH PEER\nor callback SubscribeToSearch
NodeA -> DBA: (PEER).Search(DID_B or PeerID_B)
DBA --> NodeA: Local Peer B (if known) → solve DID_B + PeerID_B\nor use search value
loop For every Peer A Binded Indexer
NodeA -> Indexer: TempStream /opencloud/record/get/1.0 -> streamAI
NodeA -> Indexer: streamAI.Encode(GetValue{Key: DID_B, PeerID: PeerID_B})
Indexer -> Indexer: key = "/node/" + DID_B
Indexer -> DHT: SearchValue(ctx 10s, "/node/"+DID_B)
DHT --> Indexer: channel de bytes (PeerRecord B)
loop Pour every results in DHT
Indexer -> Indexer: read → PeerRecord B
alt PeerRecord.PeerID == PeerID_B
Indexer -> Indexer: resp.Found=true, resp.Records[PeerID_B]=PeerRecord B
Indexer -> Indexer: StreamRecord B.LastSeen = now (if active heartbeat)
end
end
Indexer -> NodeA: streamAI.Encode(GetResponse{Found:true, Records:{PeerID_B: PeerRecord B}})
end
loop For every PeerRecord founded
NodeA -> NodeA: rec.Verify() → valid B signature
NodeA -> NodeA: rec.ExtractPeer(ourDID_A, DID_B, pubKey_B)
alt ourDID_A == DID_B (it's our proper entry)
note over NodeA: Republish to refresh TTL
NodeA -> Indexer: publishPeerRecord(rec) [refresh 2 min]
end
NodeA -> NATSA2: SetNATSPub(CREATE_RESOURCE, {PEER, Peer B JSON,\nSearchAttr:"peer_id"})
NATSA2 -> DBA: Upsert Peer B in DB A
DBA --> NATSA2: ok
end
NodeA --> NodeA: []*peer.Peer → [Peer B]
@enduml

View File

@@ -0,0 +1,49 @@
@startuml native_registration
title Native Indexer — Indexer Subscription (StartNativeRegistration)
participant "Indexer A" as IndexerA
participant "Indexer B" as IndexerB
participant "Native Indexer" as Native
participant "DHT Kademlia" as DHT
participant "GossipSub (oc-indexer-registry)" as PubSub
note over IndexerA,IndexerB: At start + every 60s (RecommendedHeartbeatInterval)\\nStartNativeRegistration → RegisterWithNative
par Indexer A subscribe
IndexerA -> IndexerA: fillRateFn()\\n= len(StreamRecords[HB]) / maxNodes
IndexerA -> IndexerA: Build IndexerRegistration{\\n PeerID_A, Addr_A,\\n Timestamp=now.UnixNano(),\\n FillRate=fillRateFn(),\\n PubKey, Signature\\n}\\nreg.Sign(h)
IndexerA -> Native: NewStream /opencloud/native/subscribe/1.0
IndexerA -> Native: stream.Encode(IndexerRegistration A)
Native -> Native: reg.Verify() — verify signature
Native -> Native: liveIndexerEntry{\\n PeerID_A, Addr_A,\\n ExpiresAt = now + IndexerTTL (90s),\\n FillRate = reg.FillRate,\\n PubKey, Signature\\n}
Native -> Native: liveIndexers[PeerID_A] = entry A
Native -> Native: knownPeerIDs[PeerID_A] = Addr_A
Native -> DHT: PutValue("/indexer/"+PeerID_A, entry A)
DHT --> Native: ok
Native -> PubSub: topic.Publish([]byte(PeerID_A))
note over PubSub: Gossip to other Natives\\n→ it adds PeerID_A to knownPeerIDs\\n→ refresh DHT next tick (30s)
IndexerA -> Native: stream.Close()
else Indexer B subscribe
IndexerB -> IndexerB: fillRateFn() + reg.Sign(h)
IndexerB -> Native: NewStream /opencloud/native/subscribe/1.0
IndexerB -> Native: stream.Encode(IndexerRegistration B)
Native -> Native: reg.Verify() + liveIndexerEntry{FillRate=reg.FillRate, ExpiresAt=now+90s}
Native -> Native: liveIndexers[PeerID_B] = entry B
Native -> DHT: PutValue("/indexer/"+PeerID_B, entry B)
Native -> PubSub: topic.Publish([]byte(PeerID_B))
IndexerB -> Native: stream.Close()
end par
note over Native: liveIndexers = {PeerID_A: {FillRate:0.3}, PeerID_B: {FillRate:0.6}}\\nTTL 90s — IndexerTTL
note over Native: Explicit unsubcrive on stop :\\nUnregisterFromNative → /opencloud/native/unsubscribe/1.0\\nNative close all now.
@enduml

View File

@@ -0,0 +1,70 @@
@startuml native_get_consensus
title Native — ConnectToNatives : fetch pool + Phase 1 + Phase 2
participant "Node / Indexer\\n(appelant)" as Caller
participant "Native A" as NA
participant "Native B" as NB
participant "Indexer A\\n(stable voter)" as IA
note over Caller: NativeIndexerAddresses configured\\nConnectToNatives() called from ConnectToIndexers
== Step 1 : heartbeat to the native mesh (nativeHeartbeatOnce) ==
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0
== Step 2 : parrallel fetch pool (timeout 6s) ==
par fetchIndexersFromNative — parallel
Caller -> NA: NewStream /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: maxIndexer, From: PeerID}
NA -> NA: reachableLiveIndexers()\\ntri par w(F) = fillRate×(1fillRate) desc
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3,IB:0.6}}
else
Caller -> NB: NewStream /opencloud/native/indexers/1.0
NB -> NB: reachableLiveIndexers()
NB --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3,IB:0.6}}
end par
note over Caller: Fusion → candidates=[IA,IB]\\nisFallback=false
alt isFallback=true (native give themself as Fallback indexer)
note over Caller: resolvePool : avoid consensus\\nadmittedAt = Now (zero)\\nStaticIndexers = {native_addr}
else isFallback=false → Phase 1 + Phase 2
== Phase 1 — clientSideConsensus (timeout 3s/natif, 4s total) ==
par Parralel Consensus
Caller -> NA: NewStream /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NA -> NA: compare with clean liveIndexers
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
else
Caller -> NB: NewStream /opencloud/native/consensus/1.0
NB --> Caller: ConsensusResponse{Trusted:[IA], Suggestions:[IC]}
end par
note over Caller: IA → 2/2 votes → confirmed ✓\\nIB → 1/2 vote → refusé ✗\\nIC → suggestion → round 2 if confirmed < maxIndexer
alt confirmed < maxIndexer && available suggestions
note over Caller: Round 2 — rechallenge with confirmed + sample(suggestions)\\nclientSideConsensus([IA, IC])
end
note over Caller: admittedAt = time.Now()
== Phase 2 — indexerLivenessVote (timeout 3s/votant, 4s total) ==
note over Caller: Search for stable voters in Subscribed Indexers\\nAdmittedAt != zero && age >= MinStableAge (2min)
alt Stable Voters are available
par Phase 2 parrallel
Caller -> IA: NewStream /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IA]}
IA -> IA: StreamRecords[ProtocolHB][candidate]\\ntime.Since(LastSeen) <= 120s && LastScore >= 30.0
IA --> Caller: IndexerConsensusResponse{Alive:[IA]}
end par
note over Caller: alive IA confirmed per quorum > 0.5\\npool = {IA}
else No voters are stable (startup)
note over Caller: Phase 1 keep directly\\n(no indexer reaches MinStableAge)
end
== Replacement pool ==
Caller -> Caller: replaceStaticIndexers(pool, admittedAt)\\nStaticIndexerMeta[IA].AdmittedAt = admittedAt
end
== Étape 3 : heartbeat to indexers pool (ConnectToIndexers) ==
Caller -> Caller: SendHeartbeat /opencloud/heartbeat/1.0\\nvers StaticIndexers
@enduml

View File

@@ -0,0 +1,38 @@
@startuml
title NATS — CREATE_RESOURCE : Peer A crée/met à jour Peer B (connexion on-demand)
participant "App Peer A (oc-api)" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "Node B" as NodeB
participant "DB Peer A (oc-lib)" as DBA
note over AppA: Peer B est découvert\n(via indexeur ou manuellement)
AppA -> NATSA: Publish(CREATE_RESOURCE, {\n FromApp:"oc-api",\n Datatype:PEER,\n Payload: Peer B {StreamAddress_B, Relation:PARTNER}\n})
NATSA -> NodeA: ListenNATS callback → CREATE_RESOURCE
NodeA -> NodeA: json.Unmarshal(payload) → peer.Peer B
NodeA -> NodeA: if peer == self ? → skip
alt peer B.Relation == PARTNER
NodeA -> StreamA: ToPartnerPublishEvent(ctx, PB_CREATE, PEER, payload)
note over StreamA: Pas de heartbeat permanent.\nConnexion on-demand : ouvre un stream,\nenvoie l'événement, ferme ou laisse expirer.
StreamA -> StreamA: PublishCommon(PEER, user, B.PeerID,\n ProtocolUpdateResource, selfPeerJSON)
StreamA -> NodeB: TempStream /opencloud/resource/update/1.0\n(TTL court, fermé après envoi)
StreamA -> NodeB: stream.Encode(Event{from, datatype, payload})
NodeB --> StreamA: (traitement applicatif)
else peer B.Relation != PARTNER (révocation / blacklist)
note over NodeA: Ferme tous les streams existants vers Peer B
loop Pour chaque stream actif vers PeerID_B
NodeA -> StreamA: streams[proto][PeerID_B].Stream.Close()
NodeA -> StreamA: delete(streams[proto], PeerID_B)
end
end
NodeA -> DBA: (pas d'écriture directe — seule l'app source gère la DB)
@enduml

View File

@@ -0,0 +1,73 @@
@startuml
title NATS — PROPALGATION_EVENT : Peer A propalgate to Peer B lookup
participant "App Pair A" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "Node Partner B" as PeerB
participant "Node C" as PeerC
participant "NATS B" as NATSB
participant "DB Pair B (oc-lib)" as DBB
note over App: only our proper resource (db data) can be propalgate : creator_id==self
AppA -> NATSA: Publish(PROPALGATION_EVENT, {Action, DataType, Payload})
NATSA -> NodeA: ListenNATS callback → PROPALGATION_EVENT
NodeA -> NodeA: propalgate from himself ? → no, continue
NodeA -> NodeA: json.Unmarshal → PropalgationMessage{Action, DataType, Payload}
alt Action == PB_DELETE
NodeA -> StreamA: ToPartnerPublishEvent(PB_DELETE, dt, user, payload)
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
StreamA -> NodeB: write(PeerID_B, addr_B, dt, user, payload, ProtocolDeleteResource)
note over NodeB: /opencloud/resource/delete/1.0
NodeB -> NodeB: handleEventFromPartner(evt, ProtocolDeleteResource)
NodeB -> NATSB: SetNATSPub(REMOVE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Suppress ressource into DB B
else Action == PB_UPDATE (per ProtocolUpdateResource)
NodeA -> StreamA: ToPartnerPublishEvent(PB_UPDATE, dt, user, payload)
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
StreamA -> NodeB: write → /opencloud/resource/update/1.0
NodeB -> NATSB: SetNATSPub(CREATE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Upsert ressource dans DB B
else Action == PB_CREATE (per ProtocolCreateResource)
NodeA -> StreamA: ToPartnerPublishEvent(PB_UPDATE, dt, user, payload)
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
StreamA -> NodeB: write → /opencloud/resource/create/1.0
NodeB -> NATSB: SetNATSPub(CREATE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Create ressource dans DB B
else Action == PB_CONSIDERS (is a considering a previous action, such as planning or creating resource)
NodeA -> NodeA: Unmarshal → executionConsidersPayload{PeerIDs:[PeerID_B, ...]}
loop For every peer_id targeted
NodeA -> StreamA: PublishCommon(dt, user, PeerID_B, ProtocolConsidersResource, payload)
StreamA -> NodeB: write → /opencloud/resource/considers/1.0
NodeB -> NodeB: passConsidering(evt)
NodeB -> NATSB: SetNATSPub(PROPALGATION_EVENT, {PB_CONSIDERS, dt, payload})
NATSB -> DBB: (treat per emmitters app of a previous action on NATS B)
end
else Action == PB_CLOSE_PLANNER
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B}
NodeA -> StreamA: Streams[ProtocolSendPlanner][PeerID_B].Stream.Close()
NodeA -> StreamA: delete(Streams[ProtocolSendPlanner], PeerID_B)
else Action == PB_SEARCH + DataType == PEER
NodeA -> NodeA: read → {search: "..."}
NodeA -> NodeA: GetPeerRecord(ctx, search)
note over NodeA: Resolved per DB A or Indexer + DHT
NodeA -> NATSA: SetNATSPub(SEARCH_EVENT, {PEER, PeerRecord JSON})
NATSA -> NATSA: (AppA retrieve results)
else Action == PB_SEARCH + other DataType
NodeA -> NodeA: read → {type:"all"|"known"|"partner", search:"..."}
NodeA -> NodeA: PubSubService.SearchPublishEvent(ctx, dt, type, user, search)
note over NodeA: Watch after pubsub_search & stream_search diagrams
end
@enduml

View File

@@ -0,0 +1,58 @@
@startuml
title PubSub — Gossip Global search (type "all") : Peer A searching, Peer B answering
participant "App UI A" as UIA
participant "App Peer A" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "PubSubService A" as PubSubA
participant "GossipSub libp2p (mesh)" as GossipSub
participant "Node B" as NodeB
participant "PubSubService B" as PubSubB
participant "DB Peer B (oc-lib)" as DBB
participant "StreamService B" as StreamB
UIA -> AppA: websocket subscription, sending {type:"all", search:"search"} in query
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_SEARCH, type:"all", search:"search"})
NATSA -> NodeA: ListenNATS → PB_SEARCH (type "all")
NodeA -> PubSubA: SearchPublishEvent(ctx, dt, "all", user, "search")
PubSubA -> PubSubA: publishEvent(PB_SEARCH, user, {search:"search"})
PubSubA -> PubSubA: priv_A.Sign(event body) → sig
PubSubA -> PubSubA: Build Event{Type:"search", From:DID_A, Payload:{search:"search"}, Sig}
PubSubA -> GossipSub: topic.Join("search")
PubSubA -> GossipSub: topic.Publish(ctx, json(Event))
GossipSub --> NodeB: Propalgate message (gossip mesh)
NodeB -> PubSubB: subscribeEvents listen to topic "search#"
PubSubB -> PubSubB: read → Event{From: DID_A}
PubSubB -> NodeB: GetPeerRecord(ctx, DID_A)
note over NodeB: Resolve Peer A per DB B or ask to Indexer
NodeB --> PubSubB: Peer A {PublicKey_A, Relation, ...}
PubSubB -> PubSubB: event.Verify(Peer A) → valid sig_A
PubSubB -> PubSubB: handleEventSearch(ctx, evt, PB_SEARCH)
PubSubB -> StreamB: SendResponse(Peer A, evt)
StreamB -> DBB: Search(COMPUTE + STORAGE + ..., filters{creator=self, access=PUBLIC OR partnerships[PeerID_A]}, search="search")
DBB --> StreamB: [Resource1, Resource2, ...]
loop For every matching resource, only match our own resource creator_id=self_did
StreamB -> StreamB: write(PeerID_A, addr_A, dt, resource JSON, ProtocolSearchResource)
StreamB -> StreamA: NewStream /opencloud/resource/search/1.0
StreamB -> StreamA: stream.Encode(Event{Type:search, From:DID_B, DataType, Payload:resource})
end
StreamA -> StreamA: readLoop → handleEvent(ProtocolSearchResource, evt)
StreamA -> StreamA: retrieveResponse(evt)
StreamA -> NATSA: SetNATSPub(SEARCH_EVENT, {DataType, resource JSON})
NATSA -> AppA: Search results from Peer B
AppA -> UIA: emit on websocket
@enduml

View File

@@ -0,0 +1,54 @@
@startuml
title Stream — Direct search (type "known"/"partner") : Peer A → Peer B
participant "App UI A" as UIA
participant "App Pair A" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "PubSubService A" as PubSubA
participant "StreamService A" as StreamA
participant "DB Pair A (oc-lib)" as DBA
participant "Node B" as NodeB
participant "StreamService B" as StreamB
participant "DB Pair B (oc-lib)" as DBB
UIA -> AppA: websocket subscription, sending {type:"all", search:"search"} in query
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_SEARCH, type:"partner", search:"gpu"})
NATSA -> NodeA: ListenNATS → PB_SEARCH (type "partner")
NodeA -> PubSubA: SearchPublishEvent(ctx, dt, "partner", user, "gpu")
PubSubA -> StreamA: SearchPartnersPublishEvent(dt, user, "gpu")
StreamA -> DBA: Search(PEER, PARTNER) + PeerIDS config
DBA --> StreamA: [Peer B, ...]
loop Pour chaque pair partenaire (Pair B)
StreamA -> StreamA: write(PeerID_B, addr_B, dt, user, payload, ProtocolSearchResource)
StreamA -> NodeB: TempStream /opencloud/resource/search/1.0
StreamA -> NodeB: stream.Encode(Event{Type:search, From:DID_A, DataType, Payload:{search:"gpu"}})
NodeB -> StreamB: HandleResponse(stream) → readLoop
StreamB -> StreamB: handleEvent(ProtocolSearchResource, evt)
StreamB -> StreamB: handleEventFromPartner(evt, ProtocolSearchResource)
alt evt.DataType == -1 (toutes ressources)
StreamB -> DBA: Search(PEER, evt.From=DID_A)
note over StreamB: Local Resolving (DB) or GetPeerRecord (Indexer Way)
StreamB -> StreamB: SendResponse(Peer A, evt)
StreamB -> DBB: Search(ALL_RESOURCES, filter{creator=B + public OR partner A + search:"gpu"})
DBB --> StreamB: [Resource1, Resource2, ...]
else evt.DataType specified
StreamB -> DBB: Search(DataType, filter{creator=B + access + search:"gpu"})
DBB --> StreamB: [Resource1, ...]
end
loop Pour chaque ressource
StreamB -> StreamA: write(PeerID_A, addr_A, dt, resource JSON, ProtocolSearchResource)
StreamA -> StreamA: readLoop → handleEvent(ProtocolSearchResource, evt)
StreamA -> StreamA: retrieveResponse(evt)
StreamA -> NATSA: SetNATSPub(SEARCH_EVENT, {DataType, resource JSON})
NATSA -> AppA: Peer B results
AppA -> UIA: emit on websocket
end
end
@enduml

View File

@@ -0,0 +1,60 @@
@startuml
title Stream — Partner Heartbeat et propagation CRUD Pair A ↔ Pair B
participant "DB Pair A (oc-lib)" as DBA
participant "StreamService A" as StreamA
participant "Node A" as NodeA
participant "Node B" as NodeB
participant "StreamService B" as StreamB
participant "NATS B" as NATSB
participant "DB Pair B (oc-lib)" as DBB
participant "NATS A" as NATSA
note over StreamA: Démarrage → connectToPartners()
StreamA -> DBA: Search(PEER, PARTNER) + PeerIDS config
DBA --> StreamA: [Peer B, ...]
StreamA -> NodeB: Connect (libp2p)
StreamA -> NodeB: NewStream /opencloud/resource/heartbeat/partner/1.0
StreamA -> NodeB: json.Encode(Heartbeat{Name_A, DID_A, PeerID_A, IndexersBinded_A})
NodeB -> StreamB: HandlePartnerHeartbeat(stream)
StreamB -> StreamB: CheckHeartbeat → bandwidth challenge
StreamB -> StreamA: Echo(payload)
StreamB -> StreamB: streams[ProtocolHeartbeatPartner][PeerID_A] = {DID_A, Expiry=now+10s}
StreamA -> StreamA: streams[ProtocolHeartbeatPartner][PeerID_B] = {DID_B, Expiry=now+10s}
note over StreamA,StreamB: Stream partner long-lived établi\nGC toutes les 8s (StreamService A)\nGC toutes les 30s (StreamService B)
note over NATSA: Pair A reçoit PROPALGATION_EVENT{PB_DELETE, dt:"storage", payload:res}
NATSA -> NodeA: ListenNATS → ToPartnerPublishEvent(PB_DELETE, dt, user, payload)
NodeA -> StreamA: ToPartnerPublishEvent(ctx, PB_DELETE, dt_storage, user, payload)
alt dt == PEER (mise à jour relation partenaire)
StreamA -> StreamA: json.Unmarshal → peer.Peer B updated
alt B.Relation == PARTNER
StreamA -> NodeB: ConnectToPartner(B.StreamAddress)
note over StreamA,NodeB: Reconnexion heartbeat si relation upgrade
else B.Relation != PARTNER
loop Tous les protocoles
StreamA -> StreamA: delete(streams[proto][PeerID_B])
StreamA -> NodeB: (streams fermés)
end
end
else dt != PEER (ressource ordinaire)
StreamA -> DBA: Search(PEER, PARTNER) → [Pair B, ...]
loop Pour chaque protocole partner (Create/Update/Delete)
StreamA -> NodeB: write(PeerID_B, addr_B, dt, user, payload, ProtocolDeleteResource)
note over NodeB: /opencloud/resource/delete/1.0
NodeB -> StreamB: HandleResponse → readLoop
StreamB -> StreamB: handleEventFromPartner(evt, ProtocolDeleteResource)
StreamB -> NATSB: SetNATSPub(REMOVE_RESOURCE, {DataType, resource JSON})
NATSB -> DBB: Supprimer ressource dans DB B
end
end
@enduml

View File

@@ -0,0 +1,51 @@
@startuml
title Stream — Session Planner : Pair A demande le plan de Pair B
participant "App Pair A (oc-booking)" as AppA
participant "NATS A" as NATSA
participant "Node A" as NodeA
participant "StreamService A" as StreamA
participant "Node B" as NodeB
participant "StreamService B" as StreamB
participant "DB Pair B (oc-lib)" as DBB
participant "NATS B" as NATSB
' Ouverture session planner
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_PLANNER, peer_id:PeerID_B, payload:{}})
NATSA -> NodeA: ListenNATS → PB_PLANNER
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B, payload: {}}
NodeA -> StreamA: PublishCommon(nil, user, PeerID_B, ProtocolSendPlanner, {})
note over StreamA: WaitResponse=true, TTL=24h\nStream long-lived vers Pair B
StreamA -> NodeB: TempStream /opencloud/resource/planner/1.0
StreamA -> NodeB: json.Encode(Event{Type:planner, From:DID_A, Payload:{}})
NodeB -> StreamB: HandleResponse → readLoop(ProtocolSendPlanner)
StreamB -> StreamB: handleEvent(ProtocolSendPlanner, evt)
StreamB -> StreamB: sendPlanner(evt)
alt evt.Payload vide (requête initiale)
StreamB -> DBB: planner.GenerateShallow(AdminRequest)
DBB --> StreamB: plan (shallow booking plan de Pair B)
StreamB -> StreamA: PublishCommon(nil, user, DID_A, ProtocolSendPlanner, planJSON)
StreamA -> NodeA: json.Encode(Event{plan de B})
NodeA -> NATSA: (forwardé à AppA via SEARCH_EVENT ou PLANNER event)
NATSA -> AppA: Plan de Pair B
else evt.Payload non vide (mise à jour planner)
StreamB -> StreamB: m["peer_id"] = evt.From (DID_A)
StreamB -> NATSB: SetNATSPub(PROPALGATION_EVENT, {PB_PLANNER, peer_id:DID_A, payload:plan})
NATSB -> DBB: (oc-booking traite le plan sur NATS B)
end
' Fermeture session planner
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_CLOSE_PLANNER, peer_id:PeerID_B})
NATSA -> NodeA: ListenNATS → PB_CLOSE_PLANNER
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B}
NodeA -> StreamA: Mu.Lock()
NodeA -> StreamA: Streams[ProtocolSendPlanner][PeerID_B].Stream.Close()
NodeA -> StreamA: delete(Streams[ProtocolSendPlanner], PeerID_B)
NodeA -> StreamA: Mu.Unlock()
note over StreamA,NodeB: Stream planner fermé — session terminée
@enduml

View File

@@ -0,0 +1,61 @@
@startuml
title Native Indexer — Boucles background (offload, DHT refresh, GC streams)
participant "Indexer A (enregistré)" as IndexerA
participant "Indexer B (enregistré)" as IndexerB
participant "Native Indexer" as Native
participant "DHT Kademlia" as DHT
participant "Node A (responsible peer)" as NodeA
note over Native: runOffloadLoop — toutes les 30s
loop Toutes les 30s
Native -> Native: len(responsiblePeers) > 0 ?
note over Native: responsiblePeers = peers pour lesquels\nle native a fait selfDelegate (aucun indexer dispo)
alt Des responsible peers existent (ex: Node A)
Native -> Native: reachableLiveIndexers()
note over Native: Filtre liveIndexers par TTL\nping PeerIsAlive pour chaque candidat
alt Indexers A et B maintenant joignables
Native -> Native: responsiblePeers = {} (libère Node A et autres)
note over Native: Node A se reconnectera\nau prochain ConnectToNatives
else Toujours aucun indexer
note over Native: Node A reste sous la responsabilité du native
end
end
end
note over Native: refreshIndexersFromDHT — toutes les 30s
loop Toutes les 30s
Native -> Native: Collecter tous les knownPeerIDs\n= {PeerID_A, PeerID_B, ...}
loop Pour chaque PeerID connu
Native -> Native: liveIndexers[PeerID] encore frais ?
alt Entrée manquante ou expirée
Native -> DHT: SearchValue(ctx 5s, "/indexer/"+PeerID)
DHT --> Native: channel de bytes
loop Pour chaque résultat DHT
Native -> Native: Unmarshal → liveIndexerEntry
Native -> Native: Garder le meilleur (ExpiresAt le plus récent, valide)
end
Native -> Native: liveIndexers[PeerID] = best entry
note over Native: "native: refreshed indexer from DHT"
end
end
end
note over Native: LongLivedStreamRecordedService GC — toutes les 30s
loop Toutes les 30s
Native -> Native: gc() — lock StreamRecords[Heartbeat]
loop Pour chaque StreamRecord (Indexer A, B, ...)
Native -> Native: now > rec.Expiry ?\nOU timeSince(LastSeen) > 2×TTL restant ?
alt Pair périmé (ex: Indexer B disparu)
Native -> Native: Supprimer Indexer B de TOUS les maps de protocoles
note over Native: Stream heartbeat fermé\nliveIndexers[PeerID_B] expirera naturellement
end
end
end
note over IndexerA: Indexer A continue à heartbeater normalement\net reste dans StreamRecords + liveIndexers
@enduml

View File

@@ -0,0 +1,49 @@
@startuml 15_archi_config_nominale
skinparam componentStyle rectangle
skinparam backgroundColor white
skinparam defaultTextAlignment center
title C1 — Topologie nominale\n2 natifs · 2 indexeurs · 2 nœuds
package "Couche 1 — Mesh natif" #E8F4FD {
component "Native A\n(hub autoritaire)" as NA #AED6F1
component "Native B\n(hub autoritaire)" as NB #AED6F1
NA <--> NB : heartbeat /opencloud/heartbeat/1.0 (20s)\n+ gossip PubSub oc-indexer-registry
}
package "Couche 2 — Indexeurs" #E9F7EF {
component "Indexer A\n(DHT server)" as IA #A9DFBF
component "Indexer B\n(DHT server)" as IB #A9DFBF
}
package "Couche 3 — Nœuds" #FEFBD8 {
component "Node 1" as N1 #FAF0BE
component "Node 2" as N2 #FAF0BE
}
' Enregistrements (one-shot, 60s)
IA -[#117A65]--> NA : subscribe signé (60s)\n/opencloud/native/subscribe/1.0
IA -[#117A65]--> NB : subscribe signé (60s)
IB -[#117A65]--> NA : subscribe signé (60s)
IB -[#117A65]--> NB : subscribe signé (60s)
' Heartbeats indexeurs → natifs (long-lived, 20s)
IA -[#27AE60]..> NA : heartbeat (20s)
IA -[#27AE60]..> NB : heartbeat (20s)
IB -[#27AE60]..> NA : heartbeat (20s)
IB -[#27AE60]..> NB : heartbeat (20s)
' Heartbeats nœuds → indexeurs (long-lived, 20s)
N1 -[#E67E22]--> IA : heartbeat long-lived (20s)\n/opencloud/heartbeat/1.0
N1 -[#E67E22]--> IB : heartbeat long-lived (20s)
N2 -[#E67E22]--> IA : heartbeat long-lived (20s)
N2 -[#E67E22]--> IB : heartbeat long-lived (20s)
note as Legend
Légende :
──► enregistrement one-shot (signé)
···► heartbeat long-lived (20s)
──► heartbeat nœud → indexeur (20s)
end note
@enduml

View File

@@ -0,0 +1,38 @@
@startuml 16_archi_config_seed
skinparam componentStyle rectangle
skinparam backgroundColor white
skinparam defaultTextAlignment center
title C2 — Mode seed (sans natif)\nIndexerAddresses seuls · AdmittedAt = zero
package "Couche 2 — Indexeurs seeds" #E9F7EF {
component "Indexer A\n(seed, AdmittedAt=0)" as IA #A9DFBF
component "Indexer B\n(seed, AdmittedAt=0)" as IB #A9DFBF
}
package "Couche 3 — Nœuds" #FEFBD8 {
component "Node 1" as N1 #FAF0BE
component "Node 2" as N2 #FAF0BE
}
note as NNative #FFDDDD
Aucun natif configuré.
AdmittedAt = zero → IsStableVoter() = false
Phase 2 sans votants : Phase 1 conservée directement.
Risque D20 : circularité du trust (seeds se valident mutuellement).
end note
' Heartbeats nœuds → indexeurs seeds
N1 -[#E67E22]--> IA : heartbeat long-lived (20s)
N1 -[#E67E22]--> IB : heartbeat long-lived (20s)
N2 -[#E67E22]--> IA : heartbeat long-lived (20s)
N2 -[#E67E22]--> IB : heartbeat long-lived (20s)
note bottom of IA
Après 2s : goroutine async
fetchNativeFromIndexers → ?
Si natif trouvé → ConnectToNatives (upgrade vers C1)
Si non → mode indexeur pur (D20 actif)
end note
@enduml

View File

@@ -0,0 +1,63 @@
@startuml 17_startup_consensus_phase1_phase2
title Démarrage avec natifs — Phase 1 (admission) + Phase 2 (vivacité)
participant "Node / Indexer\n(appelant)" as Caller
participant "Native A" as NA
participant "Native B" as NB
participant "Indexer A" as IA
participant "Indexer B" as IB
note over Caller: ConnectToNatives()\nNativeIndexerAddresses configuré
== Étape 0 : heartbeat vers le mesh natif ==
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
== Étape 1 : fetch pool en parallèle ==
par Fetch parallèle (timeout 6s)
Caller -> NA: GET /opencloud/native/indexers/1.0\nGetIndexersRequest{Count: max, FillRates demandés}
NA -> NA: reachableLiveIndexers()\ntri par w(F) = fillRate×(1fillRate)
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3, IB:0.6}}
else
Caller -> NB: GET /opencloud/native/indexers/1.0
NB -> NB: reachableLiveIndexers()
NB --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3, IB:0.6}}
end par
note over Caller: Fusion + dédup → candidates = [IA, IB]\nisFallback = false
== Étape 2a : Phase 1 — Admission native (clientSideConsensus) ==
par Consensus parallèle (timeout 3s par natif, 4s total)
Caller -> NA: /opencloud/native/consensus/1.0\nConsensusRequest{Candidates:[IA,IB]}
NA -> NA: croiser avec liveIndexers
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
else
Caller -> NB: /opencloud/native/consensus/1.0\nConsensusRequest{Candidates:[IA,IB]}
NB -> NB: croiser avec liveIndexers
NB --> Caller: ConsensusResponse{Trusted:[IA], Suggestions:[IC]}
end par
note over Caller: IA → 2/2 votes → confirmé ✓\nIB → 1/2 vote → refusé ✗\nadmittedAt = time.Now()
== Étape 2b : Phase 2 — Liveness vote (indexerLivenessVote) ==
note over Caller: Cherche votants stables dans StaticIndexerMeta\n(AdmittedAt != zero, age >= MinStableAge=2min)
alt Votants stables disponibles
par Phase 2 parallèle (timeout 3s)
Caller -> IA: /opencloud/indexer/consensus/1.0\nIndexerConsensusRequest{Candidates:[IA]}
IA -> IA: vérifier StreamRecords[ProtocolHB][candidate]\nLastSeen ≤ 2×60s && LastScore ≥ 30
IA --> Caller: IndexerConsensusResponse{Alive:[IA]}
end par
note over Caller: IA confirmé vivant par quorum > 0.5
else Aucun votant stable (premier démarrage)
note over Caller: Phase 1 conservée directement\n(aucun votant MinStableAge atteint)
end
== Étape 3 : remplacement StaticIndexers ==
Caller -> Caller: replaceStaticIndexers(pool={IA}, admittedAt)\nStaticIndexerMeta[IA].AdmittedAt = time.Now()
== Étape 4 : heartbeat long-lived vers pool ==
Caller -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
note over Caller: Pool actif. NudgeIndexerHeartbeat()
@enduml

View File

@@ -0,0 +1,51 @@
@startuml 18_startup_seed_discovers_native
title C2 → C1 — Seed découvre un natif (upgrade async)
participant "Node / Indexer\\n(seed mode)" as Caller
participant "Indexer A\\n(seed)" as IA
participant "Indexer B\\n(seed)" as IB
participant "Native A\\n(découvert)" as NA
note over Caller: Démarrage sans NativeIndexerAddresses\\nStaticIndexers = [IA, IB] (AdmittedAt=0)
== Phase initiale seed ==
Caller -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
Caller -> IB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
note over Caller: Pool actif en mode seed.\\nIsStableVoter() = false (AdmittedAt=0)\\nPhase 2 sans votants → Phase 1 conservée.
== Goroutine async après 2s ==
note over Caller: time.Sleep(2s)\\nfetchNativeFromIndexers()
Caller -> IA: GET /opencloud/indexer/natives/1.0
IA --> Caller: GetNativesResponse{Natives:[NA]}
note over Caller: Natif découvert : NA\\nAppel ConnectToNatives([NA])
== Upgrade vers mode nominal (ConnectToNatives) ==
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
par Fetch pool depuis natif (timeout 6s)
Caller -> NA: GET /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: max}
NA -> NA: reachableLiveIndexers()\\ntri par w(F) = fillRate×(1fillRate)
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.4, IB:0.6}}
end par
note over Caller: candidates = [IA, IB], isFallback = false
par Consensus Phase 1 (timeout 3s)
Caller -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NA -> NA: croiser avec liveIndexers
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
end par
note over Caller: IA ✓ IB ✓ (1/1 vote)\\nadmittedAt = time.Now()
note over Caller: Aucun votant stable (AdmittedAt vient d'être posé)\\nPhase 2 sautée → Phase 1 conservée directement
== Remplacement pool ==
Caller -> Caller: replaceStaticIndexers(pool={IA,IB}, admittedAt)\\nStaticIndexerMeta[IA].AdmittedAt = time.Now()\\nStaticIndexerMeta[IB].AdmittedAt = time.Now()
note over Caller: Pool upgradé dans la map partagée StaticIndexers.\\nLa goroutine heartbeat existante (démarrée en mode seed)\\ndétecte les nouveaux membres sur le prochain tick (20s).\\nAucune nouvelle goroutine créée.\\nIsStableVoter() deviendra true après MinStableAge (2min).\\nD20 (circularité seeds) éliminé.
@enduml

View File

@@ -0,0 +1,55 @@
@startuml failure_indexer_crash
title Indexer Failure → replenish from a Native
participant "Node" as N
participant "Indexer A (alive)" as IA
participant "Indexer B (crashed)" as IB
participant "Native A" as NA
participant "Native B" as NB
note over N: Active Pool : Indexers = [IA, IB]\\nActive Heartbeat long-lived from IA & IB
== IB Failure ==
IB ->x N: heartbeat fails (sendHeartbeat err)
note over N: doTick() dans SendHeartbeat triggers failure\\n→ delete(Indexers[IB])\\n→ delete(IndexerMeta[IB])\\nUnique heartbeat goroutine continue
N -> N: go replenishIndexersFromNative(need=1)
note over N: Reduced Pool to 1 indexers.\\nReplenish triggers with goroutine.
== Replenish from natives ==
par Fetch pool (timeout 6s)
N -> NA: GET /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: max}
NA -> NA: reachableLiveIndexers()\\n(IB absent because of a expired heartbeat)
NA --> N: GetIndexersResponse{Indexers:[IA,IC], FillRates:{IA:0.4,IC:0.2}}
else
N -> NB: GET /opencloud/native/indexers/1.0
NB --> N: GetIndexersResponse{Indexers:[IA,IC]}
end par
note over N: Fusion + duplication → candidates = [IA, IC]\\n(IA already in pool → IC new candidate)
par Consensus Phase 1 (timeout 4s)
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IC]}
NA --> N: ConsensusResponse{Trusted:[IA,IC]}
else
N -> NB: /opencloud/native/consensus/1.0
NB --> N: ConsensusResponse{Trusted:[IA,IC]}
end par
note over N: IC → 2/2 votes → admit\\nadmittedAt = time.Now()
par Phase 2 — liveness vote (if stable voters )
N -> IA: /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IC]}
IA -> IA: StreamRecords[ProtocolHB][IC]\\nLastSeen ≤ 120s && LastScore ≥ 30
IA --> N: IndexerConsensusResponse{Alive:[IC]}
end par
note over N: IC confirmed alive → add to pool
N -> N: replaceStaticIndexers(pool={IA,IC})
N -> IC: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine long-live)
note over N: Pool restaured to 2 indexers.
@enduml

View File

@@ -0,0 +1,51 @@
@startuml failure_indexers_native_falback
title indexers failures → native IsSelfFallback
participant "Node" as N
participant "Indexer A (crashed)" as IA
participant "Indexer B (crashed)" as IB
participant "Native A" as NA
participant "Native B" as NB
note over N: Active Pool : Indexers = [IA, IB]
== Successive Failures on IA & IB ==
IA ->x N: heartbeat failure (sendHeartbeat err)
IB ->x N: heartbeat failure (sendHeartbeat err)
note over N: doTick() in SendHeartbeat triggers failures\\n→ delete(StaticIndexers[IA]), delete(StaticIndexers[IB])\\n→ delete(StaticIndexerMeta[IA/IB])\\n unique heartbeat goroutine continue.
N -> N: go replenishIndexersFromNative(need=2)
== Replenish attempt — natives switches to self-fallback mode ==
par Fetch from natives (timeout 6s)
N -> NA: GET /opencloud/native/indexers/1.0
NA -> NA: reachableLiveIndexers() → 0 alive indexer\\nFallback : included as himself(IsSelfFallback=true)
NA --> N: GetIndexersResponse{Indexers:[NA_addr], IsSelfFallback:true}
else
N -> NB: GET /opencloud/native/indexers/1.0
NB --> N: GetIndexersResponse{Indexers:[NB_addr], IsSelfFallback:true}
end par
note over N: isFallback=true → resolvePool avoids consensus\\nadmittedAt = time.Time{} (zero)\\nStaticIndexers = {NA_addr} (native as fallback)
N -> NA: SendHeartbeat /opencloud/heartbeat/1.0\\n(native as temporary fallback indexers)
note over NA: responsiblePeers[N] registered.\\nrunOffloadLoop look after real indexers.
== Reprise IA → runOffloadLoop native side ==
IA -> NA: /opencloud/native/subscribe/1.0\\nIndexerRegistration{FillRate: 0}
note over NA: liveIndexers[IA] updated.\\nrunOffloadLoop triggers a real available indexer\\migrate from N to IA.
== Replenish on next heartbeat tick ==
N -> NA: GET /opencloud/native/indexers/1.0
NA --> N: GetIndexersResponse{Indexers:[IA], IsSelfFallback:false}
note over N: isFallback=false → Classic Phase 1 + Phase 2
N -> N: replaceStaticIndexers(pool={IA}, admittedAt)
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0
note over N: Pool restaured. Native self extracted as indexer.
@enduml

View File

@@ -0,0 +1,46 @@
@startuml failure_native_one_down
title Native failure, with one still alive
participant "Indexer A" as IA
participant "Indexer B" as IB
participant "Native A (crashed)" as NA
participant "Native B (alive)" as NB
participant "Node" as N
note over IA, NB: Native State : IA, IB heartbeats to NA & NB
== Native A Failure ==
NA ->x IA: stream reset
NA ->x IB: stream reset
NA ->x N: stream reset (heartbeat Node → NA)
== Indexers side : replenishNativesFromPeers ==
note over IA: SendHeartbeat(NA) détecte reset\\nAfterDelete(NA)\\nStaticNatives = [NB] (still 1)
IA -> IA: replenishNativesFromPeers()\\nphase 1 : fetchNativeFromNatives
IA -> NB: GET /opencloud/native/peers/1.0
NB --> IA: GetPeersResponse{Peers:[NC]} /' new native if one known '/
alt NC disponible
IA -> NC: SendHeartbeat /opencloud/heartbeat/1.0\\nSubscribe /opencloud/native/subscribe/1.0
note over IA: StaticNatives = [NB, NC]\\nNative Pool restored.
else Aucun peer natif
IA -> IA: fetchNativeFromIndexers()\\nAsk to any indexers their natives
IB --> IA: GetNativesResponse{Natives:[]} /' IB also only got NB '/
note over IA: Impossible to find a 2e native.\\nStaticNatives = [NB] (degraded but alive).
end
== Node side : alive indexers pool ==
note over N: Node heartbeats to IA & IB.\\nNA Failure does not affect indexers pool.\\nFuture Consensus did not use NB (1/1 vote = quorum OK).
N -> NB: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NB --> N: ConsensusResponse{Trusted:[IA,IB]}
note over N: Consensus 1/1 alive natif → admit.\\nAuto downgrade of the consensus floor (alive majority).
== NB side : heartbeat to NA fails ==
note over NB: EnsureNativePeers / SendHeartbeat to NA\\nfail (sendHeartbeat err)\\n→ delete(StaticNatives[NA])\\nreplenishNativesFromPeers(NA) triggers
note over NB: Mesh natif downgraded to NB alone.\\Downgraded but functionnal.
@enduml

View File

@@ -0,0 +1,60 @@
@startuml 22_failure_both_natives
title F4 — Panne des 2 natifs → fallback pool pré-validé
participant "Node" as N
participant "Indexer A\\n(vivant)" as IA
participant "Indexer B\\n(vivant)" as IB
participant "Native A\\n(crashé)" as NA
participant "Native B\\n(crashé)" as NB
note over N: Pool actif : StaticIndexers = [IA, IB]\\nStaticNatives = [NA, NB]\\nAdmittedAt[IA] et AdmittedAt[IB] posés (stables)
== Panne simultanée NA et NB ==
NA ->x N: stream reset
NB ->x N: stream reset
N -> N: AfterDelete(NA) + AfterDelete(NB)\\nStaticNatives = {} (vide)
== replenishNativesFromPeers (sans résultat) ==
N -> N: fetchNativeFromNatives() → aucun natif vivant
N -> IA: GET /opencloud/indexer/natives/1.0
IA --> N: GetNativesResponse{Natives:[NA,NB]}
note over N: NA et NB connus mais non joignables.\\nAucun nouveau natif trouvé.
== Fallback : pool d'indexeurs conservé ==
note over N: isFallback = true\\nStaticIndexers conservé tel quel [IA, IB]\\n(dernier pool validé avec AdmittedAt != zero)\\nRisque D19 atténué : quorum natif = 0 → fallback accepté
note over N: Heartbeats IA et IB continuent normalement.\\nPool d'indexeurs opérationnel sans natifs.
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (continue)
N -> IB: SendHeartbeat /opencloud/heartbeat/1.0 (continue)
== retryLostNative (30s ticker) ==
loop toutes les 30s
N -> N: retryLostNative()\\ntente reconnexion NA et NB
N -> NA: dial (échec)
N -> NB: dial (échec)
note over N: Retry sans résultat.\\nPool indexeurs maintenu en fallback.
end
== Reprise natifs ==
NA -> NA: redémarrage
NB -> NB: redémarrage
N -> NA: dial (succès)
N -> NA: SendHeartbeat /opencloud/heartbeat/1.0
N -> NB: SendHeartbeat /opencloud/heartbeat/1.0
note over N: StaticNatives = [NA, NB] restauré\\nisFallback = false
== Re-consensus pool indexeurs (optionnel) ==
par Consensus Phase 1
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
NA --> N: ConsensusResponse{Trusted:[IA,IB]}
else
N -> NB: /opencloud/native/consensus/1.0
NB --> N: ConsensusResponse{Trusted:[IA,IB]}
end par
note over N: Pool [IA,IB] reconfirmé.\\nisFallback = false. AdmittedAt[IA,IB] rafraîchi.
@enduml

View File

@@ -0,0 +1,63 @@
@startuml 23_failure_native_plus_indexer
title F5 — Panne combinée : 1 natif + 1 indexeur
participant "Node" as N
participant "Indexer A\\n(vivant)" as IA
participant "Indexer B\\n(crashé)" as IB
participant "Native A\\n(vivant)" as NA
participant "Native B\\n(crashé)" as NB
note over N: Pool nominal : StaticIndexers=[IA,IB], StaticNatives=[NA,NB]
== Pannes simultanées NB + IB ==
NB ->x N: stream reset
IB ->x N: stream reset
N -> N: AfterDelete(NB) — StaticNatives = [NA]
N -> N: AfterDelete(IB) — StaticIndexers = [IA]
== Replenish natif (1 vivant) ==
N -> N: replenishNativesFromPeers()
N -> NA: GET /opencloud/native/peers/1.0
NA --> N: GetPeersResponse{Peers:[]} /' NB seul pair, disparu '/
note over N: Aucun natif alternatif.\\nStaticNatives = [NA] — dégradé.
== Replenish indexeur depuis NA ==
par Fetch pool (timeout 6s)
N -> NA: GET /opencloud/native/indexers/1.0
NA -> NA: reachableLiveIndexers()\\n(IB absent — heartbeat expiré)
NA --> N: GetIndexersResponse{Indexers:[IA,IC], FillRates:{IA:0.5,IC:0.3}}
end par
note over N: candidates = [IA, IC]
par Consensus Phase 1 — 1 seul natif vivant (timeout 3s)
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IC]}
NA --> N: ConsensusResponse{Trusted:[IA,IC]}
end par
note over N: IC → 1/1 vote → admis (quorum sur vivants)\\nadmittedAt = time.Now()
par Phase 2 liveness vote
N -> IA: /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IC]}
IA -> IA: StreamRecords[ProtocolHB][IC]\\nLastSeen ≤ 120s && LastScore ≥ 30
IA --> N: IndexerConsensusResponse{Alive:[IC]}
end par
N -> N: replaceStaticIndexers(pool={IA,IC})
N -> IC: SendHeartbeat /opencloud/heartbeat/1.0
note over N: Pool restauré à [IA,IC].\\nMode dégradé : 1 natif seulement.\\nretryLostNative(NB) actif (30s ticker).
== retryLostNative pour NB ==
loop toutes les 30s
N -> NB: dial (échec)
end
NB -> NB: redémarrage
NB -> NA: heartbeat (mesh natif reconstruit)
N -> NB: dial (succès)
N -> NB: SendHeartbeat /opencloud/heartbeat/1.0
note over N: StaticNatives = [NA,NB] restauré.\\nMode nominal retrouvé.
@enduml

View File

@@ -0,0 +1,45 @@
@startuml 24_failure_retry_lost_native
title F6 — retryLostNative : reconnexion natif après panne réseau
participant "Node / Indexer" as Caller
participant "Native A\\n(vivant)" as NA
participant "Native B\\n(réseau instable)" as NB
note over Caller: StaticNatives = [NA, NB]\\nHeartbeats actifs vers NA et NB
== Panne réseau transitoire vers NB ==
NB ->x Caller: stream reset (timeout réseau)
Caller -> Caller: AfterDelete(NB)\\nStaticNatives = [NA]\\nlostNatives.Store(NB.addr)
== replenishNativesFromPeers — phase 1 ==
Caller -> NA: GET /opencloud/native/peers/1.0
NA --> Caller: GetPeersResponse{Peers:[NB]}
note over Caller: NB connu de NA, tentative de reconnexion directe
Caller -> NB: dial (échec — réseau toujours coupé)
note over Caller: Connexion impossible.\\nPassage en retryLostNative()
== retryLostNative : ticker 30s ==
loop toutes les 30s tant que NB absent
Caller -> Caller: retryLostNative()\\nParcourt lostNatives
Caller -> NB: StartNativeRegistration (dial + heartbeat + subscribe)
NB --> Caller: dial échoue
note over Caller: Retry loggé. Prochain essai dans 30s.
end
== Réseau rétabli ==
note over NB: Réseau rétabli\\nNB de nouveau joignable
Caller -> NB: StartNativeRegistration\\ndial (succès)
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
Caller -> NB: /opencloud/native/subscribe/1.0\\nIndexerRegistration{FillRate: fillRateFn()}
NB --> Caller: subscribe ack
Caller -> Caller: lostNatives.Delete(NB.addr)\\nStaticNatives = [NA, NB] restauré
note over Caller: Mode nominal retrouvé.\\nnativeHeartbeatOnce non utilisé (goroutine déjà active pour NA).\\nNouvelle goroutine SendHeartbeat pour NB uniquement.
@enduml

View File

@@ -0,0 +1,35 @@
@startuml 25_failure_node_gc
title F7 — Crash nœud → GC indexeur + AfterDelete
participant "Node\n(crashé)" as N
participant "Indexer A" as IA
participant "Indexer B" as IB
note over N, IB: État nominal : N heartbeatait vers IA et IB
== Crash Node ==
N ->x IA: stream reset (heartbeat coupé)
N ->x IB: stream reset (heartbeat coupé)
== GC côté Indexer A ==
note over IA: HandleHeartbeat : stream reset détecté\nStreamRecords[ProtocolHeartbeat][N].Expiry figé
loop ticker GC (30s) — StartGC(30*time.Second)
IA -> IA: gc()\nnow.After(Expiry) où Expiry = lastHBTime + 2min\n→ si 2min sans heartbeat → éviction
IA -> IA: delete(StreamRecords[ProtocolHeartbeat][N])\nAfterDelete(N, name, did) appelé hors lock
note over IA: N retiré du registre vivant.\nFillRate recalculé : (n-1) / MaxNodesConn()
end
== Impact fill rate ==
note over IA: FillRate diminue.\nProchain BuildHeartbeatResponse\ninclura FillRate mis à jour.\nSi fillRate revient < 80% :\n→ offload.inBatch et alreadyTried réinitialisés.
== GC côté Indexer B ==
note over IB: Même GC effectué.\nN retiré de StreamRecords[ProtocolHeartbeat].
== Reconnexion éventuelle du nœud ==
N -> N: redémarrage
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0\nHeartbeat{name, PeerID_N, IndexersBinded, need, record}
IA -> IA: HandleHeartbeat → UptimeTracker(FirstSeen=now)\nStreamRecords[ProtocolHeartbeat][N] recréé\nRepublish PeerRecord N dans DHT
note over IA: N de retour avec FirstSeen frais.\ndynamicMinScore élevé tant que age < 24h.\n(phase de grâce : 2 ticks avant scoring)
@enduml

88
docs/diagrams/README.md Normal file
View File

@@ -0,0 +1,88 @@
# OC-Discovery — Diagrammes d'architecture et de séquence
Tous les fichiers sont au format [PlantUML](https://plantuml.com/).
Rendu possible via VS Code (extension PlantUML), IntelliJ, ou [plantuml.com/plantuml](https://www.plantuml.com/plantuml/uml/).
> **Note :** Les diagrammes 06, 07, 12, 1424 et plusieurs protocoles ci-dessous
> concernaient l'architecture à 3 niveaux (node → indexer → native indexer),
> supprimée dans la branche `feature/no_native_consortium`. Ces fichiers sont
> conservés à titre historique. Les diagrammes actifs sont indiqués ci-dessous.
## Diagrammes actifs (architecture 2 niveaux)
### Séquences principales
| Fichier | Description |
|---------|-------------|
| `01_node_init.puml` | Initialisation d'un Node : libp2p host + PSK + ConnectionGater + ConnectToIndexers + SendHeartbeat + DHT proactive |
| `02_node_claim.puml` | Enregistrement du nœud : `claimInfo` + `publishPeerRecord` → indexeurs → DHT |
| `03_indexer_heartbeat.puml` | Protocole heartbeat bidirectionnel : challenges PeerID + DHT + witness, scoring 7 dimensions, suggestions, SuggestMigrate |
| `04_indexer_publish.puml` | Publication d'un `PeerRecord` vers l'indexeur → DHT (PutValue /node, /name, /pid) |
| `05_indexer_get.puml` | Résolution d'un pair : `GetPeerRecord` → indexeur → DHT si absent local |
| `08_nats_create_resource.puml` | Handler NATS `CREATE_RESOURCE` : propagation partenaires on-demand |
| `09_nats_propagation.puml` | Handler NATS `PROPALGATION_EVENT` : delete, considers, planner, search |
| `10_pubsub_search.puml` | Recherche gossip globale (GossipSub /opencloud/search/1.0) |
| `11_stream_search.puml` | Recherche directe par stream (type `"known"` ou `"partner"`) |
| `13_planner_flow.puml` | Session planner (ouverture, échange, fermeture) |
### Résilience et pool management
| Fichier | Description |
|---------|-------------|
| `hb_failure_evict.puml` | HeartbeatFailure → evictPeer → TriggerConsensus ou DHT replenish |
| `hb_last_indexer.puml` | Protection last-indexer → reconnectToSeeds → retryUntilSeedResponds |
| `dht_discovery.puml` | Découverte proactive DHT : Provide/FindProviders, SelectByFillRate, dhtCache |
| `connection_gater.puml` | ConnectionGater : DB blacklist → DHT sequential check (transport-error fallthrough) |
## Diagrammes historiques (architecture 3 niveaux — obsolètes)
Ces fichiers documentent l'ancienne architecture. Ils ne correspondent plus
au code en production.
| Fichier | Description |
|---------|-------------|
| `06_native_registration.puml` | Enregistrement d'un indexeur auprès du Native (supprimé) |
| `07_native_get_consensus.puml` | `ConnectToNatives` : fetch pool + Phase 1 + Phase 2 (supprimé) |
| `12_partner_heartbeat.puml` | Heartbeat partner permanent (supprimé — connexions on-demand) |
| `14_native_offload_gc.puml` | Boucles background Native Indexer (supprimé) |
| `15_archi_config_nominale.puml` | Topologie nominale avec natifs (obsolète) |
| `16_archi_config_seed.puml` | Mode seed sans natif (obsolète) |
| `17_startup_consensus_phase1_phase2.puml` | Démarrage avec consensus natifs (supprimé) |
| `18_startup_seed_discovers_native.puml` | Upgrade seed → native (supprimé) |
| `19_failure_indexer_crash.puml` | F1 — replenish depuis natif (supprimé) |
| `20_failure_both_indexers_selfdelegate.puml` | F2 — IsSelfFallback native (supprimé) |
| `21_failure_native_one_down.puml` | F3 — panne 1 natif (supprimé) |
| `22_failure_both_natives.puml` | F4 — panne 2 natifs (supprimé) |
| `23_failure_native_plus_indexer.puml` | F5 — panne combinée natif + indexeur (supprimé) |
| `24_failure_retry_lost_native.puml` | F6 — retryLostNative (supprimé) |
| `25_failure_node_gc.puml` | F7 — GC nœud côté indexeur (toujours valide) |
## Protocoles libp2p actifs
| Protocole | Description |
|-----------|-------------|
| `/opencloud/heartbeat/1.0` | Heartbeat bidirectionnel node→indexeur (long-lived) |
| `/opencloud/probe/1.0` | Sonde de bande passante (echo, mesure latence + débit) |
| `/opencloud/witness/1.0` | Requête témoin : "quel est ton score de l'indexeur X ?" |
| `/opencloud/record/publish/1.0` | Publication `PeerRecord` vers indexeur |
| `/opencloud/record/get/1.0` | Requête `GetPeerRecord` vers indexeur |
| `/opencloud/resource/search/1.0` | Recherche de ressources entre peers |
| `/opencloud/resource/create/1.0` | Propagation création ressource → partner |
| `/opencloud/resource/update/1.0` | Propagation mise à jour ressource → partner |
| `/opencloud/resource/delete/1.0` | Propagation suppression ressource → partner |
| `/opencloud/resource/planner/1.0` | Session planner (booking) |
| `/opencloud/resource/verify/1.0` | Vérification signature ressource |
| `/opencloud/resource/considers/1.0` | Transmission d'un considers d'exécution |
## Protocoles supprimés (architecture native)
| Protocole | Raison |
|-----------|--------|
| `/opencloud/native/subscribe/1.0` | Tier native supprimé |
| `/opencloud/native/unsubscribe/1.0` | Tier native supprimé |
| `/opencloud/native/indexers/1.0` | Remplacé par DHT FindProviders |
| `/opencloud/native/consensus/1.0` | Remplacé par TriggerConsensus léger |
| `/opencloud/native/peers/1.0` | Tier native supprimé |
| `/opencloud/indexer/natives/1.0` | Tier native supprimé |
| `/opencloud/indexer/consensus/1.0` | Remplacé par TriggerConsensus |
| `/opencloud/resource/heartbeat/partner/1.0` | Heartbeat partner supprimé — on-demand |

View File

@@ -0,0 +1,69 @@
@startuml connection_gater
title ConnectionGater — Vérification à l'admission (InterceptSecured)
participant "Remote Peer\n(inbound)" as Remote
participant "libp2p\nhost A" as Host
participant "OCConnectionGater" as Gater
participant "DB (oc-lib)" as DB
participant "Indexer X\n(joignable)" as IX
participant "Indexer Y\n(injoignable)" as IY
Remote -> Host: inbound connection (post-PSK, post-TLS)
Host -> Gater: InterceptSecured(dir=Inbound, id=RemotePeerID, conn)
alt dir == Outbound
Gater --> Host: true (outbound toujours autorisé)
end
== Étape 1 : Vérification base de données ==
Gater -> DB: NewRequestAdmin(PEER).Search(\n Filter: peer_id = RemotePeerID\n)
DB --> Gater: []peer.Peer
alt trouvé AND relation == BLACKLIST
Gater --> Host: false (refusé — blacklisté)
Host ->x Remote: connexion fermée
end
alt trouvé AND relation != BLACKLIST
Gater --> Host: true (connu et non blacklisté)
end
== Étape 2 : Vérification DHT (peer inconnu en DB) ==
note over Gater: Peer inconnu → vérifier qu'il existe\ndans le réseau DHT
Gater -> Gater: getReq = GetValue{PeerID: RemotePeerID}
loop Pour chaque indexeur (ordre aléatoire — Shuffle)
alt Indexer IY injoignable (transport error)
Gater -> IY: h.Connect(ctxTTL, IY_AddrInfo)
IY -->x Gater: connexion échouée
note over Gater: reachable=false\n→ essaie le suivant
end
alt Indexer IX joignable
Gater -> IX: h.Connect(ctxTTL, IX_AddrInfo)
IX --> Gater: OK
Gater -> IX: TempStream /opencloud/record/get/1.0
Gater -> IX: stream.Encode(GetValue{PeerID: RemotePeerID})
IX -> IX: Recherche locale + DHT si absent
IX --> Gater: GetResponse{Found: true/false, Records}
note over Gater: reachable=true → réponse autoritaire\n(DHT distribué : un seul indexeur suffit)
alt Found == true
Gater --> Host: true (pair connu du réseau)
else Found == false
Gater --> Host: false (refusé — inconnu du réseau)
Host ->x Remote: connexion fermée
end
end
end
alt Aucun indexeur joignable
note over Gater: Réseau naissant ou tous isolés.\nAutorisation par défaut.
Gater --> Host: true
end
@enduml

View File

@@ -0,0 +1,56 @@
@startuml dht_discovery
title Découverte DHT : Provide/FindProviders + SelectByFillRate + dhtCache indexeur
participant "Indexer A\n(nouveau)" as IA
participant "DHT Network" as DHT
participant "Node B\n(bootstrap)" as NodeB
participant "Indexer A\n(existant)" as IAexist
== Inscription indexeur dans la DHT ==
note over IA: Démarrage IndexerService\nstartDHTProvide(fillRateFn)
IA -> IA: Attend adresse routable (max 60s)\nnon-loopback disponible
IA -> DHT: DHT.Bootstrap(ctx)\n→ routing table warmup
loop ticker RecommendedHeartbeatInterval (~20s)
IA -> DHT: DHT.Provide(IndexerCID, true)\n← IndexerCID = CID(sha256("/opencloud/indexers"))
note over DHT: L'indexeur est annoncé comme provider.\nTTL géré par libp2p-kad-dht.\nAuto-expire si Provide() s'arrête.
end
== Cache DHT passif de l'indexeur ==
note over IA: startDHTCacheRefresh()\ngoroutine arrière-plan
IA -> IA: Initial delay 30s (routing table warmup)
loop ticker 2min
IA -> DHT: DiscoverIndexersFromDHT(h, dht, 30)\n← FindProviders(IndexerCID, max=30)
DHT --> IA: []AddrInfo (jusqu'à 30 candidats)
IA -> IA: Filtre self\nSelectByFillRate(filtered, nil, 10)\n→ diversité /24, prior f=0.5 (fill rates inconnus)
IA -> IA: dhtCache = selected (max 10)\n→ utilisé pour Suggestions dans BuildHeartbeatResponse
end
== Découverte côté Node au bootstrap ==
NodeB -> NodeB: ConnectToIndexers → seeds ajoutés\nSendHeartbeat démarré
NodeB -> NodeB: goroutine proactive (après 5s warmup)
alt discoveryDHT == nil (node pur, pas d'IndexerService)
NodeB -> DHT: initNodeDHT(h, seeds)\n← DHT client mode, bootstrappé sur seeds
end
NodeB -> DHT: DiscoverIndexersFromDHT(h, discoveryDHT, need+extra)
DHT --> NodeB: []AddrInfo candidats
NodeB -> NodeB: Filtre self\nSelectByFillRate(candidates, fillRates, need)\n→ pondération w(F) = F×(1-F)\n F=0.2 → w=0.16 (très probable)\n F=0.5 → w=0.25 (max)\n F=0.8 → w=0.16 (peu probable)\n→ filtre diversité /24
loop Pour chaque candidat retenu
NodeB -> NodeB: Indexers.SetAddr(key, &addrInfo)\nNudgeIt() → heartbeat immédiat
end
note over NodeB: Pool enrichi au-delà des seeds.\nScoring commence au premier heartbeat.\nSeeds restent IsSeed=true (stickiness).
@enduml

View File

@@ -0,0 +1,41 @@
@startuml hb_failure_evict
title HeartbeatFailure → evictPeer → TriggerConsensus ou DHT replenish
participant "Node A" as NodeA
participant "Indexer X\n(défaillant)" as IX
participant "Indexer Y\n(voter)" as IY
participant "Indexer Z\n(voter)" as IZ
participant "DHT" as DHT
participant "Indexer NEW\n(candidat)" as INEW
note over NodeA: SendHeartbeat tick — Indexer X dans le pool
NodeA -> IX: stream.Encode(Heartbeat{...})
IX -->x NodeA: timeout / transport error
NodeA -> NodeA: HeartbeatFailure(h, proto, dir, addr_X, info_X, isIndexerHB=true, maxPool)
NodeA -> NodeA: evictPeer(dir, addr_X, id_X, proto)\n→ Streams.Delete(proto, &id_X)\n→ DeleteAddr(addr_X)\n→ DeleteScore(addr_X)\n→ voters = remaining AddrInfos
NodeA -> NodeA: poolSize = len(dir.GetAddrs())
alt poolSize == 0
NodeA -> NodeA: reconnectToSeeds()\n→ réinjecte IndexerAddresses (IsSeed=true)
alt seeds ajoutés
NodeA -> NodeA: need = maxPool\nNudgeIt() → tick immédiat
else aucun seed configuré ou seeds injoignables
NodeA -> NodeA: go retryUntilSeedResponds()\n(backoff 10s→5min, panic si IndexerAddresses vide)
end
else poolSize > 0 AND len(voters) > 0
NodeA -> NodeA: go TriggerConsensus(h, voters, need)
NodeA -> IY: stream GET → GetValue{Key: candidate_DID}
IY --> NodeA: GetResponse{Found, Records}
NodeA -> IZ: stream GET → GetValue{Key: candidate_DID}
IZ --> NodeA: GetResponse{Found, Records}
note over NodeA: Quorum check:\nfound=true AND lastSeen ≤ 2×interval\nAND lastScore ≥ 30\n→ majorité → admission INEW
NodeA -> NodeA: Indexers.SetAddr(addr_NEW, &INEW_AddrInfo)\nIndexers.SetScore(addr_NEW, Score{IsSeed:false})\nNudgeIt()
else poolSize > 0 AND len(voters) == 0
NodeA -> DHT: go replenishIndexersFromDHT(h, need)\nDiscoverIndexersFromDHT → SelectByFillRate\n→ add to Indexers Directory
end
@enduml

View File

@@ -0,0 +1,46 @@
@startuml hb_last_indexer
title Protection last-indexer → reconnectToSeeds → retryUntilSeedResponds
participant "Node A" as NodeA
participant "Indexer LAST\n(seul restant)" as IL
participant "Seed Indexer\n(config)" as SEED
participant "DHT" as DHT
note over NodeA: Pool = 1 indexeur (LAST)\nIsSeed=false, score bas depuis longtemps
== Tentative d'éviction par score ==
NodeA -> NodeA: score < minScore\nAND TotalOnline ≥ 2×interval\nAND !IsSeed\nAND len(pool) > 1 ← FAUX : pool == 1
note over NodeA: Garde active : len(pool) == 1\n→ éviction par score BLOQUÉE\nLAST reste dans le pool
== Panne réseau (heartbeat fail) ==
NodeA -> IL: stream.Encode(Heartbeat{...})
IL -->x NodeA: timeout
NodeA -> NodeA: HeartbeatFailure → evictPeer(LAST)\npoolSize = 0
NodeA -> NodeA: reconnectToSeeds()\n→ parse IndexerAddresses (conf)\n→ SetAddr + SetScore(IsSeed=true) pour chaque seed
alt seeds ajoutés (IndexerAddresses non vide)
NodeA -> NodeA: NudgeIt() → tick immédiat
NodeA -> SEED: Heartbeat{...} (via SendHeartbeat nudge)
SEED --> NodeA: HeartbeatResponse{fillRate, ...}
note over NodeA: Pool rétabli via seeds.\nDHT proactive discovery reprend.
else IndexerAddresses vide
NodeA -> NodeA: go retryUntilSeedResponds()
note over NodeA: panic immédiat :\n"pool is empty and no seed indexers configured"\n→ arrêt du processus
end
== retryUntilSeedResponds (si seeds non répondants) ==
loop backoff exponentiel (10s → 20s → ... → 5min)
NodeA -> NodeA: time.Sleep(backoff)
NodeA -> NodeA: len(Indexers.GetAddrs()) > 0?\n→ oui : retour (quelqu'un a refillé)
NodeA -> NodeA: reconnectToSeeds()
alt pool > 0 après reconnect
NodeA -> NodeA: NudgeIt()\nDHT.Bootstrap(ctx, 15s)
note over NodeA: Sortie de la boucle.\nHeartbeat normal reprend.
end
end
@enduml

60
go.mod
View File

@@ -1,65 +1,73 @@
module oc-discovery
go 1.24.6
go 1.25.0
require (
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995
github.com/beego/beego v1.12.13
github.com/beego/beego/v2 v2.3.8
github.com/go-redis/redis v6.15.9+incompatible
github.com/smartystreets/goconvey v1.7.2
github.com/tidwall/gjson v1.17.3
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908
github.com/ipfs/go-cid v0.6.0
github.com/libp2p/go-libp2p v0.47.0
github.com/libp2p/go-libp2p-record v0.3.1
github.com/multiformats/go-multiaddr v0.16.1
github.com/multiformats/go-multihash v0.2.3
)
require (
github.com/beego/beego/v2 v2.3.8 // indirect
github.com/benbjohnson/clock v1.3.5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
github.com/dunglas/httpsfv v1.1.0 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/filecoin-project/go-clock v0.1.0 // indirect
github.com/flynn/noise v1.1.0 // indirect
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/google/gnostic-models v0.7.0 // indirect
github.com/google/gopacket v1.1.19 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/huin/goupnp v1.3.0 // indirect
github.com/ipfs/boxo v0.35.2 // indirect
github.com/ipfs/go-cid v0.6.0 // indirect
github.com/ipfs/go-datastore v0.9.0 // indirect
github.com/ipfs/go-log/v2 v2.9.1 // indirect
github.com/ipld/go-ipld-prime v0.21.0 // indirect
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/koron/go-ssdp v0.0.6 // indirect
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
github.com/libp2p/go-cidranger v1.1.0 // indirect
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
github.com/libp2p/go-libp2p v0.47.0 // indirect
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
github.com/libp2p/go-msgio v0.3.0 // indirect
github.com/libp2p/go-netroute v0.4.0 // indirect
github.com/libp2p/go-reuseport v0.4.0 // indirect
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
github.com/miekg/dns v1.1.68 // indirect
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
github.com/mr-tron/base58 v1.2.0 // indirect
github.com/multiformats/go-base32 v0.1.0 // indirect
github.com/multiformats/go-base36 v0.2.0 // indirect
github.com/multiformats/go-multiaddr v0.16.1 // indirect
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
github.com/multiformats/go-multibase v0.2.0 // indirect
github.com/multiformats/go-multicodec v0.10.0 // indirect
github.com/multiformats/go-multihash v0.2.3 // indirect
github.com/multiformats/go-multistream v0.6.1 // indirect
github.com/multiformats/go-varint v0.1.0 // indirect
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
@@ -89,6 +97,7 @@ require (
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/metric v1.39.0 // indirect
@@ -99,13 +108,28 @@ require (
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
golang.org/x/mod v0.32.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect
golang.org/x/term v0.39.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.41.0 // indirect
gonum.org/v1/gonum v0.17.0 // indirect
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
k8s.io/api v0.35.1 // indirect
k8s.io/apimachinery v0.35.1 // indirect
k8s.io/client-go v0.35.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
lukechampine.com/blake3 v1.4.1 // indirect
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
sigs.k8s.io/yaml v1.6.0 // indirect
)
require (
@@ -117,13 +141,10 @@ require (
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.27.0 // indirect
github.com/golang/snappy v1.0.0 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect
github.com/google/uuid v1.6.0
github.com/goraz/onion v0.1.3 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/jtolds/gls v4.20.0+incompatible // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/libp2p/go-libp2p-kad-dht v0.37.1
github.com/libp2p/go-libp2p-pubsub v0.15.0
@@ -139,13 +160,8 @@ require (
github.com/prometheus/client_model v0.6.2 // indirect
github.com/prometheus/common v0.66.1 // indirect
github.com/prometheus/procfs v0.17.0 // indirect
github.com/robfig/cron v1.2.0 // indirect
github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/rs/zerolog v1.34.0 // indirect
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
github.com/smartystreets/assertions v1.2.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
github.com/xdg-go/scram v1.1.2 // indirect
github.com/xdg-go/stringprep v1.0.4 // indirect

362
go.sum
View File

@@ -1,212 +1,141 @@
cloud.o-forge.io/core/oc-lib v0.0.0-20250108155542-0f4adeea86be h1:1Yf8ihUxXjOEPqcfgtXJpJ/slxBUHhf7AgS7DZI3iUk=
cloud.o-forge.io/core/oc-lib v0.0.0-20250108155542-0f4adeea86be/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
cloud.o-forge.io/core/oc-lib v0.0.0-20250603080047-03dea551315b h1:yfXDZ0Pw5xTWstsbZWS+MV7G3ZTSvOCTwWQJWRn4Z5k=
cloud.o-forge.io/core/oc-lib v0.0.0-20250603080047-03dea551315b/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0 h1:iEm/Rf9I0OSCcncuFy61YOSZ3jdRlhJ/oLD97Pc2pCQ=
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27 h1:iogk6pV3gybzQDBXMI6Qd/jvSA1h+3oRE+vLl1MRjew=
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260126120055-055e6c70cdd7 h1:LAK86efqe2HNV1Tkym1TpvzL1Xsj3F0ClsK/snfejD0=
cloud.o-forge.io/core/oc-lib v0.0.0-20260126120055-055e6c70cdd7/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260127143728-3c052bf16572 h1:jrUHgs4DqNWLnLcb5nd4lrJim77+aGkJFACUfMogiu8=
cloud.o-forge.io/core/oc-lib v0.0.0-20260127143728-3c052bf16572/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140632-d098d253d8e2 h1:B3TO9nXdpGuPXL4X3QFrRMJ1C4zXCQlLh4XR9aSZoKg=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140632-d098d253d8e2/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140807-1c9d7b63c0b3 h1:zAT4ZulAaX+l28QdCMvuXh5XQxn+fU8x6YNJ1zmA7+Q=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140807-1c9d7b63c0b3/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152242-743f4a6ff742 h1:vGlUqBhj3G5hvskL1NzfecKCUMH8bL3xx7JkLpv/04M=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152242-743f4a6ff742/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152919-7911cf29def8 h1:HT1+PP04wu5DcQ5PA3LtSJ5PcWEyL4FlZB62+v9eLWo=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152919-7911cf29def8/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128154447-d26789d64e33 h1:WdmHeRtEWV3RsXaEe4HnItGNYLFvMNFggfq9/KtPho0=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128154447-d26789d64e33/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128160440-c0d89ea9e1e8 h1:h7VHJktaTT8TxO4ld3Xjw3LzMsivr3m7mzbNxb44zes=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128160440-c0d89ea9e1e8/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128162702-97cf629e27ec h1:/uvrtEt7A5rwqFPHH8yjujlC33HMjQHhWDIK6I08DrA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260128162702-97cf629e27ec/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260129121215-c1519f6b26b8 h1:gvUbTwHnYM0Ezzvoa9ylTt+o1lAhS0U79OogbsZ+Pl8=
cloud.o-forge.io/core/oc-lib v0.0.0-20260129121215-c1519f6b26b8/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260129122033-186ba3e689c7 h1:NRFGRqN+j5g3DrtXMYN5T5XSYICG+OU2DisjBdID3j8=
cloud.o-forge.io/core/oc-lib v0.0.0-20260129122033-186ba3e689c7/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203074447-30e6c9a6183c h1:c19lIseiUk5Hp+06EowfEbMWH1pK8AC/hvQ4ryWgJtY=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203074447-30e6c9a6183c/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150123-4258f6b58083 h1:nKiU4AfeX+axS4HkaX8i2PJyhSFfRJvzT+CgIv6Jl2o=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150123-4258f6b58083/go.mod h1:T0UCxRd8w+qCVVC0NEyDiWIGC5ADwEbQ7hFcvftd4Ks=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995 h1:ZDRvnzTTNHgMm5hYmseHdEPqQ6rn/4v+P9f/JIxPaNw=
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995/go.mod h1:T0UCxRd8w+qCVVC0NEyDiWIGC5ADwEbQ7hFcvftd4Ks=
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa h1:1wCpI4dwN1pj6MlpJ7/WifhHVHmCE4RU+9klwqgo/bk=
cloud.o-forge.io/core/oc-lib v0.0.0-20260304145747-e03a0d3dd0aa/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908 h1:1jz3xI/u2FzCG8phY7ShqADrmCj0mlrdjbdNUosSwgs=
cloud.o-forge.io/core/oc-lib v0.0.0-20260311072518-933b7147e908/go.mod h1:+ENuvBfZdESSvecoqGY/wSvRlT3vinEolxKgwbOhUpA=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
github.com/beego/beego v1.12.13 h1:g39O1LGLTiPejWVqQKK/TFGrroW9BCZQz6/pf4S8IRM=
github.com/beego/beego v1.12.13/go.mod h1:QURFL1HldOcCZAxnc1cZ7wrplsYR5dKPHFjmk6WkLAs=
github.com/beego/beego/v2 v2.3.1 h1:7MUKMpJYzOXtCUsTEoXOxsDV/UcHw6CPbaWMlthVNsc=
github.com/beego/beego/v2 v2.3.1/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
github.com/biter777/countries v1.7.5/go.mod h1:1HSpZ526mYqKJcpT5Ti1kcGQ0L0SrXWIaptUWjFfv2E=
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/couchbase/go-couchbase v0.0.0-20201216133707-c04035124b17/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
github.com/couchbase/gomemcached v0.1.2-0.20201224031647-c432ccf49f32/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo=
github.com/couchbase/goutils v0.0.0-20210118111533-e33d3ffb5401/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54=
github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI=
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU=
github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4=
github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/glendc/gopher-json v0.0.0-20170414221815-dc4743023d0c/go.mod h1:Gja1A+xZ9BoviGJNA2E9vFkPjjsl+CoJxSXiQM1UXtw=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8=
github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U=
github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30=
github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ=
github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk=
github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo=
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E=
github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -216,10 +145,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6/go.mod h1:n931TsDuKuq+uX4v1fulaMbA/7ZLLhjc85h7chZGBCQ=
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
@@ -240,6 +167,8 @@ github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOU
github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E=
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI=
github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98=
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
@@ -249,9 +178,12 @@ github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8S
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY=
github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
@@ -259,10 +191,9 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
@@ -276,9 +207,13 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
@@ -308,29 +243,21 @@ github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOo
github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/peterh/liner v1.0.1-0.20171122030339-3681c2a91233/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
@@ -373,46 +300,17 @@ github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
@@ -421,28 +319,15 @@ github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SA
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI=
github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow=
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg=
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs=
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
github.com/siddontang/goredis v0.0.0-20150324035039-760763f78400/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s=
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
@@ -452,37 +337,32 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/syndtr/goleveldb v0.0.0-20160425020131-cfa635847112/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94=
github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
github.com/wendal/errors v0.0.0-20181209125328-7f31f4b264ec/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
@@ -494,11 +374,6 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8=
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
@@ -513,6 +388,8 @@ go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
@@ -521,25 +398,19 @@ go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
@@ -552,11 +423,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -568,43 +436,22 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -618,15 +465,10 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
@@ -634,6 +476,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
@@ -642,12 +486,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
@@ -668,38 +506,42 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=

1
logs.txt Normal file

File diff suppressed because one or more lines are too long

13
main.go
View File

@@ -2,7 +2,6 @@ package main
import (
"context"
"fmt"
"log"
"oc-discovery/conf"
"oc-discovery/daemons/node"
@@ -21,34 +20,34 @@ func main() {
oclib.InitDaemon(appname)
// get the right config file
o := oclib.GetConfLoader()
o := oclib.GetConfLoader(appname)
conf.GetConfig().Name = o.GetStringDefault("NAME", "opencloud-demo")
conf.GetConfig().Hostname = o.GetStringDefault("HOSTNAME", "127.0.0.1")
conf.GetConfig().PSKPath = o.GetStringDefault("PSK_PATH", "./psk/psk.key")
conf.GetConfig().NodeEndpointPort = o.GetInt64Default("NODE_ENDPOINT_PORT", 4001)
conf.GetConfig().PublicKeyPath = o.GetStringDefault("PUBLIC_KEY_PATH", "./pem/public.pem")
conf.GetConfig().PrivateKeyPath = o.GetStringDefault("PRIVATE_KEY_PATH", "./pem/private.pem")
conf.GetConfig().IndexerAddresses = o.GetStringDefault("INDEXER_ADDRESSES", "")
conf.GetConfig().PeerIDS = o.GetStringDefault("PEER_IDS", "")
conf.GetConfig().NodeMode = o.GetStringDefault("NODE_MODE", "node")
conf.GetConfig().MinIndexer = o.GetIntDefault("MIN_INDEXER", 1)
conf.GetConfig().MaxIndexer = o.GetIntDefault("MAX_INDEXER", 5)
ctx, stop := signal.NotifyContext(
context.Background(),
os.Interrupt,
syscall.SIGTERM,
)
defer stop()
fmt.Println(conf.GetConfig().NodeMode)
isNode := strings.Contains(conf.GetConfig().NodeMode, "node")
isIndexer := strings.Contains(conf.GetConfig().NodeMode, "indexer")
if n, err := node.InitNode(isNode, isIndexer); err != nil {
panic(err)
} else {
<-ctx.Done() // 👈 the only blocking point
<-ctx.Done() // the only blocking point
log.Println("shutting down")
n.Close()
}