Compare commits
31 Commits
feature/dh
...
feature/no
| Author | SHA1 | Date | |
|---|---|---|---|
| 7f951afd41 | |||
| fa341494d9 | |||
| 29b26d366e | |||
| 46dee0a6cb | |||
| 2108df5283 | |||
| 380de4c80b | |||
| 83285c2ab5 | |||
| edcfecd24b | |||
| 80117ee36f | |||
| 780a0c530d | |||
| d0af40f4c7 | |||
| 83cef6e6f6 | |||
| 3751ec554d | |||
| ef3d998ead | |||
| 79aa3cc2b3 | |||
| 779e36aaef | |||
| 572da29fd4 | |||
| 3eae5791a1 | |||
| 88fd05066c | |||
| 0250c3b339 | |||
| 6a5ffb9a92 | |||
| fa914958b6 | |||
| 1c0b2b4312 | |||
| 631e2846fe | |||
| d985d8339a | |||
| ea14ad3933 | |||
| 2e31df89c2 | |||
| 425cbdfe7d | |||
| 8ee5b84e21 | |||
| 552bb17e2b | |||
| 88e29073a2 |
13
Dockerfile
13
Dockerfile
@@ -21,11 +21,6 @@ RUN go mod download
|
||||
FROM golang:alpine AS builder
|
||||
ARG CONF_NUM
|
||||
|
||||
# Fail fast if CONF_NUM missing
|
||||
RUN test -n "$CONF_NUM"
|
||||
|
||||
RUN apk add --no-cache git
|
||||
|
||||
WORKDIR /oc-discovery
|
||||
|
||||
# Reuse Go cache
|
||||
@@ -55,13 +50,13 @@ WORKDIR /app
|
||||
|
||||
RUN mkdir ./pem
|
||||
|
||||
COPY --from=builder /app/extracted/pem/private${CONF_NUM}.pem ./pem/private.pem
|
||||
COPY --from=builder /app/extracted/pem/private${CONF_NUM:-1}.pem ./pem/private.pem
|
||||
COPY --from=builder /app/extracted/psk ./psk
|
||||
COPY --from=builder /app/extracted/pem/public${CONF_NUM}.pem ./pem/public.pem
|
||||
COPY --from=builder /app/extracted/pem/public${CONF_NUM:-1}.pem ./pem/public.pem
|
||||
|
||||
COPY --from=builder /app/extracted/oc-discovery /usr/bin/oc-discovery
|
||||
COPY --from=builder /app/extracted/docker_discovery${CONF_NUM}.json /etc/oc/discovery.json
|
||||
COPY --from=builder /app/extracted/docker_discovery${CONF_NUM:-1}.json /etc/oc/discovery.json
|
||||
|
||||
EXPOSE 400${CONF_NUM}
|
||||
EXPOSE 400${CONF_NUM:-1}
|
||||
|
||||
ENTRYPOINT ["oc-discovery"]
|
||||
24
Makefile
24
Makefile
@@ -10,15 +10,29 @@ clean:
|
||||
rm -rf oc-discovery
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t oc/oc-discovery:0.0.1 -f Dockerfile .
|
||||
docker tag oc/oc-discovery:0.0.1 oc/oc-discovery:latest
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=5 -t oc-discovery_1 -f Dockerfile .
|
||||
docker tag oc-discovery_1 opencloudregistry/oc-discovery_1:latest
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=2 -t oc-discovery_2 -f Dockerfile .
|
||||
docker tag oc-discovery_2 opencloudregistry/oc-discovery_2:latest
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=3 -t oc-discovery_3 -f Dockerfile .
|
||||
docker tag oc-discovery_3 opencloudregistry/oc-discovery_3:latest
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=4 -t oc-discovery_4 -f Dockerfile .
|
||||
docker tag oc-discovery_4 opencloudregistry/oc-discovery_4:latest
|
||||
DOCKER_BUILDKIT=1 docker build --build-arg CONF_NUM=6 -t oc-discovery_6 -f Dockerfile .
|
||||
docker tag oc-discovery_6 opencloudregistry/oc-discovery_6:latest
|
||||
|
||||
publish-kind:
|
||||
kind load docker-image oc/oc-discovery:0.0.1 --name opencloud
|
||||
kind load docker-image opencloudregistry/oc-discovery:latest --name opencloud
|
||||
|
||||
publish-registry:
|
||||
@echo "TODO"
|
||||
docker push opencloudregistry/oc-discovery_1:latest
|
||||
docker push opencloudregistry/oc-discovery_2:latest
|
||||
docker push opencloudregistry/oc-discovery_3:latest
|
||||
docker push opencloudregistry/oc-discovery_4:latest
|
||||
docker push opencloudregistry/oc-discovery_6:latest
|
||||
|
||||
all: docker publish-kind publish-registry
|
||||
all: docker publish-kind
|
||||
|
||||
ci: docker publish-registry
|
||||
|
||||
.PHONY: build run clean docker publish-kind publish-registry
|
||||
31
README.md
31
README.md
@@ -14,3 +14,34 @@ If default Swagger page is displayed instead of tyour api, change url in swagger
|
||||
|
||||
url: "swagger.json"
|
||||
|
||||
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
participant Dev as Développeur / Owner
|
||||
participant IPFS as Réseau IPFS
|
||||
participant CID as CID (hash du fichier)
|
||||
participant Argo as Orchestrateur Argo
|
||||
participant CU as Compute Unit
|
||||
participant MinIO as Storage MinIO
|
||||
|
||||
%% 1. Ajout du fichier sur IPFS
|
||||
Dev->>IPFS: Chiffre et ajoute fichier (algo/dataset)
|
||||
IPFS-->>CID: Génère CID unique (hash du fichier)
|
||||
Dev->>Dev: Stocke CID pour référence future
|
||||
|
||||
%% 2. Orchestration par Argo
|
||||
Argo->>CID: Requête CID pour job
|
||||
CID-->>Argo: Fournit le fichier (vérifié via hash)
|
||||
|
||||
%% 3. Execution sur la Compute Unit
|
||||
Argo->>CU: Déploie job avec fichier récupéré
|
||||
CU->>CU: Vérifie hash (CID) pour intégrité
|
||||
CU->>CU: Exécute l'algo sur le dataset
|
||||
|
||||
%% 4. Stockage des résultats
|
||||
CU->>MinIO: Stocke output (résultats) ou logs
|
||||
CU->>IPFS: Optionnel : ajoute output sur IPFS (nouveau CID)
|
||||
|
||||
%% 5. Vérification et traçabilité
|
||||
Dev->>IPFS: Vérifie CID output si nécessaire
|
||||
CU->>Dev: Fournit résultat et log de hash
|
||||
|
||||
@@ -11,9 +11,34 @@ type Config struct {
|
||||
NodeEndpointPort int64
|
||||
IndexerAddresses string
|
||||
|
||||
NanoIDS string
|
||||
PeerIDS string // TO REMOVE
|
||||
|
||||
NodeMode string
|
||||
|
||||
MinIndexer int
|
||||
MaxIndexer int
|
||||
// SearchTimeout is the max duration without a new result before the
|
||||
// distributed peer search stream is closed. Default: 5s.
|
||||
SearchTimeout int // seconds; 0 → use default (5)
|
||||
|
||||
// Indexer connection burst guard: max new connections accepted within the window.
|
||||
// 0 → use defaults (20 new peers per 30s).
|
||||
MaxConnPerWindow int // default 20
|
||||
ConnWindowSecs int // default 30
|
||||
|
||||
// Per-node behavioral limits (sliding 60s window). 0 → use built-in defaults.
|
||||
MaxHBPerMinute int // default 5
|
||||
MaxPublishPerMinute int // default 10
|
||||
MaxGetPerMinute int // default 50
|
||||
|
||||
// LocationGranularity controls how precisely this node discloses its position.
|
||||
// 0 = opt-out (no location published)
|
||||
// 1 = continent (±15°)
|
||||
// 2 = country (±3°) — default
|
||||
// 3 = region (±0.5°)
|
||||
// 4 = city (±0.05°)
|
||||
LocationGranularity int // default 2
|
||||
}
|
||||
|
||||
var instance *Config
|
||||
|
||||
294
daemons/node/common/common_cache.go
Normal file
294
daemons/node/common/common_cache.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type Score struct {
|
||||
FirstContacted time.Time
|
||||
UptimeTracker *UptimeTracker
|
||||
LastFillRate float64
|
||||
Score float64
|
||||
// IsSeed marks indexers that came from the IndexerAddresses static config.
|
||||
// Seeds are sticky: they are never evicted by the score threshold alone.
|
||||
// A seed is only removed when: (a) heartbeat fails, or (b) it sends
|
||||
// SuggestMigrate and the node already has MinIndexer non-seed alternatives.
|
||||
IsSeed bool
|
||||
// challenge bookkeeping (2-3 peers per batch, raw data returned by indexer)
|
||||
hbCount int // heartbeats sent since last challenge batch
|
||||
nextChallenge int // send challenges when hbCount reaches this (rand 1-10)
|
||||
challengeTotal int // number of own-PeerID challenges sent (ground truth)
|
||||
challengeCorrect int // own PeerID found AND lastSeen within 2×interval
|
||||
// fill rate consistency: cross-check reported fillRate vs peerCount/maxNodes
|
||||
fillChecked int
|
||||
fillConsistent int
|
||||
// BornAt stability
|
||||
LastBornAt time.Time
|
||||
bornAtChanges int
|
||||
// DHT challenge
|
||||
dhtChecked int
|
||||
dhtSuccess int
|
||||
dhtBatchCounter int
|
||||
// Peer witnesses
|
||||
witnessChecked int
|
||||
witnessConsistent int
|
||||
}
|
||||
|
||||
// computeNodeSideScore computes the node's quality assessment of an indexer from raw metrics.
|
||||
// All ratios are in [0,1]; result is in [0,100].
|
||||
// - uptimeRatio : gap-aware fraction of lifetime the indexer was reachable
|
||||
// - challengeAccuracy: own-PeerID challenges answered correctly (found + recent lastSeen)
|
||||
// - latencyScore : 1 - RTT/maxRTT, clamped [0,1]
|
||||
// - fillScore : 1 - fillRate — prefer less-loaded indexers
|
||||
// - fillConsistency : fraction of ticks where peerCount/maxNodes ≈ fillRate (±10%)
|
||||
func (s *Score) ComputeNodeSideScore(latencyScore float64) float64 {
|
||||
uptime := s.UptimeTracker.UptimeRatio()
|
||||
challengeAccuracy := 1.0
|
||||
if s.challengeTotal > 0 {
|
||||
challengeAccuracy = float64(s.challengeCorrect) / float64(s.challengeTotal)
|
||||
}
|
||||
fillScore := 1.0 - s.LastFillRate
|
||||
fillConsistency := 1.0
|
||||
if s.fillChecked > 0 {
|
||||
fillConsistency = float64(s.fillConsistent) / float64(s.fillChecked)
|
||||
}
|
||||
witnessConsistency := 1.0
|
||||
if s.witnessChecked > 0 {
|
||||
witnessConsistency = float64(s.witnessConsistent) / float64(s.witnessChecked)
|
||||
}
|
||||
dhtSuccessRate := 1.0
|
||||
if s.dhtChecked > 0 {
|
||||
dhtSuccessRate = float64(s.dhtSuccess) / float64(s.dhtChecked)
|
||||
}
|
||||
base := ((0.20 * uptime) +
|
||||
(0.20 * challengeAccuracy) +
|
||||
(0.15 * latencyScore) +
|
||||
(0.10 * fillScore) +
|
||||
(0.10 * fillConsistency) +
|
||||
(0.15 * witnessConsistency) +
|
||||
(0.10 * dhtSuccessRate)) * 100
|
||||
// BornAt stability: each unexpected BornAt change penalises by 30%.
|
||||
bornAtPenalty := 1.0 - 0.30*float64(s.bornAtChanges)
|
||||
if bornAtPenalty < 0 {
|
||||
bornAtPenalty = 0
|
||||
}
|
||||
return base * bornAtPenalty
|
||||
}
|
||||
|
||||
type Directory struct {
|
||||
MuAddr sync.RWMutex
|
||||
MuScore sync.RWMutex
|
||||
MuStream sync.RWMutex
|
||||
Addrs map[string]*pp.AddrInfo
|
||||
Scores map[string]*Score
|
||||
Nudge chan struct{}
|
||||
Streams ProtocolStream
|
||||
}
|
||||
|
||||
func (d *Directory) ExistsScore(a string) bool {
|
||||
d.MuScore.RLock()
|
||||
defer d.MuScore.RUnlock()
|
||||
for addr, ai := range d.Scores {
|
||||
if ai != nil && (a == addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *Directory) GetScore(a string) *Score {
|
||||
d.MuScore.RLock()
|
||||
defer d.MuScore.RUnlock()
|
||||
for addr, s := range d.Scores {
|
||||
if s != nil && (a == addr) {
|
||||
sCopy := *s
|
||||
return &sCopy
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Directory) GetScores() map[string]*Score {
|
||||
d.MuScore.RLock()
|
||||
defer d.MuScore.RUnlock()
|
||||
score := map[string]*Score{}
|
||||
for addr, s := range d.Scores {
|
||||
score[addr] = s
|
||||
}
|
||||
return score
|
||||
}
|
||||
|
||||
func (d *Directory) DeleteScore(a string) {
|
||||
d.MuScore.RLock()
|
||||
defer d.MuScore.RUnlock()
|
||||
score := map[string]*Score{}
|
||||
for addr, s := range d.Scores {
|
||||
if a != addr {
|
||||
score[addr] = s
|
||||
}
|
||||
}
|
||||
d.Scores = score
|
||||
}
|
||||
|
||||
func (d *Directory) SetScore(addr string, score *Score) *pp.AddrInfo {
|
||||
d.MuScore.Lock()
|
||||
defer d.MuScore.Unlock()
|
||||
d.Scores[addr] = score
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Directory) ExistsAddr(addrOrId string) bool {
|
||||
d.MuAddr.RLock()
|
||||
defer d.MuAddr.RUnlock()
|
||||
for addr, ai := range d.Addrs {
|
||||
if ai != nil && (addrOrId == ai.ID.String() || addrOrId == addr) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (d *Directory) GetAddr(addrOrId string) *pp.AddrInfo {
|
||||
d.MuAddr.RLock()
|
||||
defer d.MuAddr.RUnlock()
|
||||
for addr, ai := range d.Addrs {
|
||||
if ai != nil && (addrOrId == ai.ID.String() || addrOrId == addr) {
|
||||
aiCopy := *ai
|
||||
return &aiCopy
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Directory) DeleteAddr(a string) {
|
||||
d.MuAddr.RLock()
|
||||
defer d.MuAddr.RUnlock()
|
||||
addrs := map[string]*pp.AddrInfo{}
|
||||
for addr, s := range d.Addrs {
|
||||
if a != addr {
|
||||
addrs[addr] = s
|
||||
}
|
||||
}
|
||||
d.Addrs = addrs
|
||||
}
|
||||
|
||||
func (d *Directory) SetAddr(addr string, info *pp.AddrInfo) *pp.AddrInfo {
|
||||
d.MuAddr.Lock()
|
||||
defer d.MuAddr.Unlock()
|
||||
d.Addrs[addr] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Directory) GetAddrIDs() []pp.ID {
|
||||
d.MuAddr.RLock()
|
||||
defer d.MuAddr.RUnlock()
|
||||
indexers := make([]pp.ID, 0, len(d.Addrs))
|
||||
for _, ai := range d.Addrs {
|
||||
if ai != nil {
|
||||
indexers = append(indexers, ai.ID)
|
||||
}
|
||||
}
|
||||
return Shuffle(indexers)
|
||||
}
|
||||
|
||||
func (d *Directory) GetAddrsStr() []string {
|
||||
d.MuAddr.RLock()
|
||||
defer d.MuAddr.RUnlock()
|
||||
indexers := make([]string, 0, len(d.Addrs))
|
||||
for s, ai := range d.Addrs {
|
||||
if ai != nil {
|
||||
indexers = append(indexers, s)
|
||||
}
|
||||
}
|
||||
|
||||
return Shuffle(indexers)
|
||||
}
|
||||
|
||||
type Entry struct {
|
||||
Addr string
|
||||
Info *pp.AddrInfo
|
||||
}
|
||||
|
||||
func (d *Directory) GetAddrs() []Entry {
|
||||
d.MuAddr.RLock()
|
||||
defer d.MuAddr.RUnlock()
|
||||
indexers := make([]Entry, 0, len(d.Addrs))
|
||||
for addr, ai := range d.Addrs {
|
||||
if ai != nil {
|
||||
indexers = append(indexers, Entry{
|
||||
Addr: addr,
|
||||
Info: ai,
|
||||
})
|
||||
}
|
||||
}
|
||||
return Shuffle(indexers)
|
||||
}
|
||||
|
||||
// NudgeIndexerHeartbeat signals the indexer heartbeat goroutine to fire immediately.
|
||||
func (d *Directory) NudgeIt() {
|
||||
select {
|
||||
case d.Nudge <- struct{}{}:
|
||||
default: // nudge already pending, skip
|
||||
}
|
||||
}
|
||||
|
||||
type ProtocolStream map[protocol.ID]map[pp.ID]*Stream
|
||||
|
||||
func (ps ProtocolStream) Get(protocol protocol.ID) map[pp.ID]*Stream {
|
||||
if ps[protocol] == nil {
|
||||
ps[protocol] = map[pp.ID]*Stream{}
|
||||
}
|
||||
|
||||
return ps[protocol]
|
||||
}
|
||||
|
||||
func (ps ProtocolStream) GetPerID(protocol protocol.ID, peerID pp.ID) *Stream {
|
||||
if ps[protocol] == nil {
|
||||
ps[protocol] = map[pp.ID]*Stream{}
|
||||
}
|
||||
return ps[protocol][peerID]
|
||||
}
|
||||
|
||||
func (ps ProtocolStream) Add(protocol protocol.ID, peerID *pp.ID, s *Stream) error {
|
||||
if ps[protocol] == nil {
|
||||
ps[protocol] = map[pp.ID]*Stream{}
|
||||
}
|
||||
if peerID != nil {
|
||||
if s != nil {
|
||||
ps[protocol][*peerID] = s
|
||||
} else {
|
||||
return errors.New("unable to add stream : stream missing")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps ProtocolStream) Delete(protocol protocol.ID, peerID *pp.ID) {
|
||||
if streams, ok := ps[protocol]; ok {
|
||||
if peerID != nil && streams[*peerID] != nil && streams[*peerID].Stream != nil {
|
||||
streams[*peerID].Stream.Close()
|
||||
delete(streams, *peerID)
|
||||
} else {
|
||||
for _, s := range ps {
|
||||
for _, v := range s {
|
||||
if v.Stream != nil {
|
||||
v.Stream.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(ps, protocol)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var Indexers = &Directory{
|
||||
Addrs: map[string]*pp.AddrInfo{},
|
||||
Scores: map[string]*Score{},
|
||||
Nudge: make(chan struct{}, 1),
|
||||
Streams: ProtocolStream{},
|
||||
}
|
||||
285
daemons/node/common/common_heartbeat.go
Normal file
285
daemons/node/common/common_heartbeat.go
Normal file
@@ -0,0 +1,285 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
)
|
||||
|
||||
// MemberEventType is the SWIM membership event classification.
|
||||
type MemberEventType string
|
||||
|
||||
const (
|
||||
MemberAlive MemberEventType = "alive"
|
||||
MemberSuspect MemberEventType = "suspect"
|
||||
MemberDead MemberEventType = "dead"
|
||||
)
|
||||
|
||||
// MemberEvent is a SWIM membership event piggybacked on heartbeats (infection-style).
|
||||
// HopsLeft starts at InitialEventHops and is decremented on each retransmission.
|
||||
// Receivers discard events whose HopsLeft reaches 0 instead of forwarding them further.
|
||||
// Deduplication by (PeerID, Incarnation): higher incarnation or higher-priority type wins.
|
||||
type MemberEvent struct {
|
||||
Type MemberEventType `json:"type"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Incarnation uint64 `json:"incarnation"`
|
||||
HopsLeft int `json:"hops_left"`
|
||||
}
|
||||
|
||||
type Heartbeat struct {
|
||||
Name string `json:"name"`
|
||||
Stream *Stream `json:"stream"`
|
||||
DID string `json:"did"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
IndexersBinded []string `json:"indexers_binded"`
|
||||
Score float64
|
||||
// Record carries a fresh signed PeerRecord (JSON) so the receiving indexer
|
||||
// can republish it to the DHT without an extra round-trip.
|
||||
// Only set by nodes (not indexers heartbeating other indexers).
|
||||
Record json.RawMessage `json:"record,omitempty"`
|
||||
// Need is how many more indexers this node wants (MaxIndexer - current pool size).
|
||||
// The receiving indexer uses this to know how many suggestions to return.
|
||||
// 0 means the pool is full — no suggestions needed unless SuggestMigrate.
|
||||
Need int `json:"need,omitempty"`
|
||||
// Challenges is a list of PeerIDs the node asks the indexer to spot-check.
|
||||
// Always includes the node's own PeerID (ground truth) + up to 2 additional
|
||||
// known peers. Nil means no challenge this tick.
|
||||
Challenges []string `json:"challenges,omitempty"`
|
||||
// ChallengeDID asks the indexer to retrieve this DID from the DHT (every 5th batch).
|
||||
ChallengeDID string `json:"challenge_did,omitempty"`
|
||||
// Referent marks this indexer as the node's designated search referent.
|
||||
// Only one indexer per node receives Referent=true at a time (the best-scored one).
|
||||
// The indexer stores the node in its referencedNodes for distributed search.
|
||||
Referent bool `json:"referent,omitempty"`
|
||||
// SuspectedIncarnation is set when this node currently suspects the target indexer.
|
||||
// If the value matches the indexer's own incarnation, the indexer increments its
|
||||
// incarnation and replies with the new value — this is the SWIM refutation signal.
|
||||
SuspectedIncarnation *uint64 `json:"suspected_incarnation,omitempty"`
|
||||
// MembershipEvents carries SWIM events piggybacked on this heartbeat.
|
||||
// Events are forwarded infection-style until HopsLeft reaches 0.
|
||||
MembershipEvents []MemberEvent `json:"membership_events,omitempty"`
|
||||
}
|
||||
|
||||
// SearchPeerRequest is sent by a node to an indexer via ProtocolSearchPeer.
|
||||
// The indexer broadcasts it on the GossipSub search mesh and streams results back.
|
||||
type SearchPeerRequest struct {
|
||||
QueryID string `json:"query_id"`
|
||||
// At least one of PeerID, DID, Name must be set.
|
||||
PeerID string `json:"peer_id,omitempty"`
|
||||
DID string `json:"did,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// SearchQuery is broadcast on TopicSearchPeer by the receiving indexer.
|
||||
// EmitterID is the indexer's own PeerID — responding indexers open a
|
||||
// ProtocolSearchPeerResponse stream back to it.
|
||||
type SearchQuery struct {
|
||||
QueryID string `json:"query_id"`
|
||||
PeerID string `json:"peer_id,omitempty"`
|
||||
DID string `json:"did,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
EmitterID string `json:"emitter_id"`
|
||||
}
|
||||
|
||||
// SearchPeerResult is sent by a responding indexer to the emitting indexer
|
||||
// via ProtocolSearchPeerResponse, and forwarded by the emitting indexer to
|
||||
// the node on the open ProtocolSearchPeer stream.
|
||||
|
||||
// SearchHit is a single peer found during distributed search.
|
||||
type SearchHit struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
DID string `json:"did"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// ChallengeEntry is the indexer's raw answer for one challenged peer.
|
||||
type ChallengeEntry struct {
|
||||
PeerID string `json:"peer_id"`
|
||||
Found bool `json:"found"`
|
||||
LastSeen time.Time `json:"last_seen,omitempty"` // zero if not found
|
||||
}
|
||||
|
||||
// HeartbeatResponse carries raw metrics only — no pre-cooked score.
|
||||
type HeartbeatResponse struct {
|
||||
FillRate float64 `json:"fill_rate"`
|
||||
PeerCount int `json:"peer_count"`
|
||||
MaxNodes int `json:"max_nodes"` // capacity — lets node cross-check fillRate
|
||||
BornAt time.Time `json:"born_at"`
|
||||
Challenges []ChallengeEntry `json:"challenges,omitempty"`
|
||||
// DHTFound / DHTPayload: response to a ChallengeDID request.
|
||||
DHTFound bool `json:"dht_found,omitempty"`
|
||||
DHTPayload json.RawMessage `json:"dht_payload,omitempty"`
|
||||
// Witnesses: random sample of connected nodes so the querying node can cross-check.
|
||||
Witnesses []pp.AddrInfo `json:"witnesses,omitempty"`
|
||||
// Suggestions: better indexers this indexer knows about via its DHT cache.
|
||||
// The node should open heartbeat connections to these (they become StaticIndexers).
|
||||
Suggestions []pp.AddrInfo `json:"suggestions,omitempty"`
|
||||
// SuggestMigrate: set when this indexer is overloaded (fill rate > threshold)
|
||||
// and is actively trying to hand the node off to the Suggestions list.
|
||||
// Seeds: node de-stickies this indexer once it has MinIndexer non-seed alternatives.
|
||||
// Non-seeds: node removes this indexer immediately if it has enough alternatives.
|
||||
SuggestMigrate bool `json:"suggest_migrate,omitempty"`
|
||||
// Incarnation is this indexer's current SWIM incarnation number.
|
||||
// It is incremented whenever the indexer refutes a suspicion signal.
|
||||
// The node tracks this to detect explicit refutations and to clear suspect state.
|
||||
Incarnation uint64 `json:"incarnation,omitempty"`
|
||||
// MembershipEvents carries SWIM events piggybacked on this response.
|
||||
// The node should forward them to its other indexers (infection-style).
|
||||
MembershipEvents []MemberEvent `json:"membership_events,omitempty"`
|
||||
}
|
||||
|
||||
// ComputeIndexerScore computes a composite quality score [0, 100] for the connecting peer.
|
||||
// - uptimeRatio: fraction of tracked lifetime online (gap-aware) — peer reliability
|
||||
// - bpms: bandwidth normalized to MaxExpectedMbps — link capacity
|
||||
// - diversity: indexer's own /24 subnet diversity — network topology quality
|
||||
// - latencyScore: 1 - RTT/maxRoundTrip — link responsiveness
|
||||
// - fillRate: fraction of indexer slots used (0=empty, 1=full) — collective trust signal:
|
||||
// a fuller indexer has been chosen and retained by many peers, which is evidence of quality.
|
||||
func (hb *Heartbeat) ComputeIndexerScore(uptimeRatio float64, bpms float64, diversity float64, latencyScore float64, fillRate float64) {
|
||||
hb.Score = ((0.20 * uptimeRatio) +
|
||||
(0.20 * bpms) +
|
||||
(0.20 * diversity) +
|
||||
(0.15 * latencyScore) +
|
||||
(0.25 * fillRate)) * 100
|
||||
}
|
||||
|
||||
type HeartbeatInfo []struct {
|
||||
Info []byte `json:"info"`
|
||||
}
|
||||
|
||||
// WitnessRequest is sent by a node to a peer to ask its view of a given indexer.
|
||||
type WitnessRequest struct {
|
||||
IndexerPeerID string `json:"indexer_peer_id"`
|
||||
}
|
||||
|
||||
// WitnessReport is returned by a peer in response to a WitnessRequest.
|
||||
type WitnessReport struct {
|
||||
Seen bool `json:"seen"`
|
||||
BornAt time.Time `json:"born_at,omitempty"`
|
||||
FillRate float64 `json:"fill_rate,omitempty"`
|
||||
Score float64 `json:"score,omitempty"`
|
||||
}
|
||||
|
||||
// HandleBandwidthProbe echoes back everything written on the stream, then closes.
|
||||
// It is registered by all participants so the measuring side (the heartbeat receiver)
|
||||
// can open a dedicated probe stream and read the round-trip latency + throughput.
|
||||
func HandleBandwidthProbe(s network.Stream) {
|
||||
defer s.Close()
|
||||
s.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
io.Copy(s, s) // echo every byte back to the sender
|
||||
}
|
||||
|
||||
// HandleWitnessQuery answers a witness query: the caller wants to know
|
||||
// what this node thinks of a given indexer (identified by its PeerID).
|
||||
func HandleWitnessQuery(h host.Host, s network.Stream) {
|
||||
defer s.Close()
|
||||
s.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
var req WitnessRequest
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
return
|
||||
}
|
||||
report := WitnessReport{}
|
||||
for _, ai := range Indexers.GetAddrs() {
|
||||
if ai.Info == nil || ai.Info.ID.String() != req.IndexerPeerID {
|
||||
continue
|
||||
}
|
||||
if score := Indexers.GetScore(addrKey(*ai.Info)); score != nil {
|
||||
report.Seen = true
|
||||
report.BornAt = score.LastBornAt
|
||||
report.FillRate = score.LastFillRate
|
||||
report.Score = score.Score
|
||||
}
|
||||
break
|
||||
}
|
||||
json.NewEncoder(s).Encode(report)
|
||||
}
|
||||
|
||||
// SupportsHeartbeat probes pid with a short-lived stream to verify it has
|
||||
// a ProtocolHeartbeat handler (i.e. it is an indexer, not a plain node).
|
||||
// Only protocol negotiation is performed — no data is sent.
|
||||
// Returns false on any error, including "protocol not supported".
|
||||
func SupportsHeartbeat(h host.Host, pid pp.ID) bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
s, err := h.NewStream(ctx, pid, ProtocolHeartbeat)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
s.Reset()
|
||||
return true
|
||||
}
|
||||
|
||||
// queryWitnesses contacts each witness in parallel, collects their view of the
|
||||
// indexer, and updates score.witnessChecked / score.witnessConsistent.
|
||||
// Called in a goroutine — must not hold any lock.
|
||||
func queryWitnesses(h host.Host, indexerPeerID string, indexerBornAt time.Time, indexerFillRate float64, witnesses []pp.AddrInfo, score *Score) {
|
||||
logger := oclib.GetLogger()
|
||||
type result struct{ consistent bool }
|
||||
results := make(chan result, len(witnesses))
|
||||
|
||||
for _, ai := range witnesses {
|
||||
if ai.ID == h.ID() {
|
||||
// Never query ourselves — skip and count as inconclusive.
|
||||
results <- result{}
|
||||
continue
|
||||
}
|
||||
go func(ai pp.AddrInfo) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
s, err := h.NewStream(ctx, ai.ID, ProtocolWitnessQuery)
|
||||
if err != nil {
|
||||
results <- result{}
|
||||
return
|
||||
}
|
||||
defer s.Close()
|
||||
s.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
if err := json.NewEncoder(s).Encode(WitnessRequest{IndexerPeerID: indexerPeerID}); err != nil {
|
||||
results <- result{}
|
||||
return
|
||||
}
|
||||
var rep WitnessReport
|
||||
if err := json.NewDecoder(s).Decode(&rep); err != nil || !rep.Seen {
|
||||
results <- result{}
|
||||
return
|
||||
}
|
||||
// BornAt must be identical (fixed timestamp).
|
||||
bornAtOK := !rep.BornAt.IsZero() && rep.BornAt.Equal(indexerBornAt)
|
||||
// FillRate coherent within ±25% (it fluctuates normally).
|
||||
diff := rep.FillRate - indexerFillRate
|
||||
if diff < 0 {
|
||||
diff = -diff
|
||||
}
|
||||
fillOK := diff < 0.25
|
||||
consistent := bornAtOK && fillOK
|
||||
logger.Debug().
|
||||
Str("witness", ai.ID.String()).
|
||||
Bool("bornAt_ok", bornAtOK).
|
||||
Bool("fill_ok", fillOK).
|
||||
Msg("witness report")
|
||||
results <- result{consistent: consistent}
|
||||
}(ai)
|
||||
}
|
||||
|
||||
checked, consistent := 0, 0
|
||||
for range witnesses {
|
||||
r := <-results
|
||||
checked++
|
||||
if r.consistent {
|
||||
consistent++
|
||||
}
|
||||
}
|
||||
|
||||
if checked == 0 {
|
||||
return
|
||||
}
|
||||
score.witnessChecked += checked
|
||||
score.witnessConsistent += consistent
|
||||
}
|
||||
807
daemons/node/common/common_indexer_hb.go
Normal file
807
daemons/node/common/common_indexer_hb.go
Normal file
@@ -0,0 +1,807 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"oc-discovery/conf"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
var TimeWatcher time.Time
|
||||
|
||||
// retryRunning guards against launching multiple retryUntilSeedResponds goroutines.
|
||||
var retryRunning atomic.Bool
|
||||
|
||||
// suspectTimeout is the maximum time a peer can stay in suspect state before
|
||||
// being declared dead and evicted. Aligned with 3 heartbeat intervals so the
|
||||
// peer has at least 3 chances to respond or refute the suspicion signal.
|
||||
const suspectTimeout = 3 * RecommendedHeartbeatInterval
|
||||
|
||||
func ConnectToIndexers(h host.Host, minIndexer int, maxIndexer int, recordFn ...func() json.RawMessage) error {
|
||||
TimeWatcher = time.Now().UTC()
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
// Bootstrap from IndexerAddresses seed set.
|
||||
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
|
||||
if len(addresses) > maxIndexer {
|
||||
addresses = addresses[0:maxIndexer]
|
||||
}
|
||||
for _, indexerAddr := range addresses {
|
||||
indexerAddr = strings.TrimSpace(indexerAddr)
|
||||
if indexerAddr == "" {
|
||||
continue
|
||||
}
|
||||
ad, err := pp.AddrInfoFromString(indexerAddr)
|
||||
if err != nil {
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
key := ad.ID.String()
|
||||
Indexers.SetAddr(key, ad)
|
||||
// Pre-create score entry with IsSeed=true so the sticky flag is set before
|
||||
// the first heartbeat tick (lazy creation in doTick would lose the flag).
|
||||
if !Indexers.ExistsScore(key) {
|
||||
Indexers.SetScore(key, &Score{
|
||||
FirstContacted: time.Now().UTC(),
|
||||
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
|
||||
nextChallenge: rand.Intn(10) + 1,
|
||||
IsSeed: true,
|
||||
})
|
||||
}
|
||||
}
|
||||
seeds := Indexers.GetAddrs()
|
||||
indexerCount := len(seeds)
|
||||
|
||||
if indexerCount < minIndexer {
|
||||
return fmt.Errorf("you run a node without indexers... your gonna be isolated.")
|
||||
}
|
||||
|
||||
// Start long-lived heartbeat to seed indexers. The single goroutine follows
|
||||
// all subsequent StaticIndexers changes.
|
||||
SendHeartbeat(context.Background(), ProtocolHeartbeat, conf.GetConfig().Name,
|
||||
h, Indexers, 20*time.Second, maxIndexer, recordFn...)
|
||||
|
||||
// Watch for inbound connections: if a peer connects to us and our pool has
|
||||
// room, probe it first to confirm it supports ProtocolHeartbeat (i.e. it is
|
||||
// an indexer). Plain nodes don't register the handler — the negotiation fails
|
||||
// instantly so we never pollute the pool with non-indexer peers.
|
||||
h.Network().Notify(&network.NotifyBundle{
|
||||
ConnectedF: func(n network.Network, c network.Conn) {
|
||||
if c.Stat().Direction != network.DirInbound {
|
||||
return
|
||||
}
|
||||
if len(Indexers.GetAddrs()) >= maxIndexer {
|
||||
return
|
||||
}
|
||||
peerID := c.RemotePeer()
|
||||
if Indexers.ExistsAddr(peerID.String()) {
|
||||
return
|
||||
}
|
||||
// Probe in a goroutine — ConnectedF must not block.
|
||||
go func(pid pp.ID) {
|
||||
if !SupportsHeartbeat(h, pid) {
|
||||
return // plain node, skip
|
||||
}
|
||||
if len(Indexers.GetAddrs()) >= maxIndexer {
|
||||
return
|
||||
}
|
||||
if Indexers.ExistsAddr(pid.String()) {
|
||||
return
|
||||
}
|
||||
addrs := h.Peerstore().Addrs(pid)
|
||||
if len(addrs) == 0 {
|
||||
return
|
||||
}
|
||||
ai := FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
|
||||
if len(ai.Addrs) == 0 {
|
||||
return
|
||||
}
|
||||
adCopy := ai
|
||||
Indexers.SetAddr(pid.String(), &adCopy)
|
||||
Indexers.NudgeIt()
|
||||
log := oclib.GetLogger()
|
||||
log.Info().Str("peer", pid.String()).
|
||||
Msg("[pool] inbound indexer peer added as candidate")
|
||||
}(peerID)
|
||||
},
|
||||
})
|
||||
|
||||
// Proactive DHT upgrade: once seeds are connected and the DHT routing table
|
||||
// is warm, discover better indexers and add them to the pool alongside the seeds.
|
||||
// Seeds stay as guaranteed anchors; scoring will demote poor performers over time.
|
||||
go func(seeds []Entry) {
|
||||
// Let seed connections establish and the DHT routing table warm up.
|
||||
time.Sleep(5 * time.Second)
|
||||
// For pure nodes (no IndexerService), spin up a lightweight DHT client.
|
||||
if discoveryDHT == nil {
|
||||
if len(seeds) == 0 {
|
||||
return
|
||||
}
|
||||
initNodeDHT(h, seeds)
|
||||
}
|
||||
if discoveryDHT == nil {
|
||||
return
|
||||
}
|
||||
current := len(Indexers.GetAddrs())
|
||||
need := maxIndexer - current
|
||||
if need <= 0 {
|
||||
need = maxIndexer / 2 // diversify even when pool is already at capacity
|
||||
}
|
||||
logger.Info().Int("need", need).Msg("[dht] proactive indexer discovery from DHT")
|
||||
replenishIndexersFromDHT(h, need)
|
||||
}(seeds)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reconnectToSeeds re-adds the configured seed indexers to StaticIndexers as
|
||||
// sticky fallback entries. Called when the pool drops to zero so the node
|
||||
// never becomes completely isolated.
|
||||
func reconnectToSeeds() {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Warn().Msg("[pool] all indexers lost, reconnecting to configured seeds")
|
||||
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
|
||||
for _, addrStr := range addresses {
|
||||
addrStr = strings.TrimSpace(addrStr)
|
||||
if addrStr == "" {
|
||||
continue
|
||||
}
|
||||
ad, err := pp.AddrInfoFromString(addrStr)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
key := ad.ID.String()
|
||||
Indexers.SetAddr(key, ad)
|
||||
if score := Indexers.GetScore(key); score == nil {
|
||||
Indexers.SetScore(key, &Score{
|
||||
FirstContacted: time.Now().UTC(),
|
||||
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
|
||||
nextChallenge: rand.Intn(10) + 1,
|
||||
IsSeed: true,
|
||||
})
|
||||
} else {
|
||||
// Restore sticky flag so the seed is not immediately re-ejected.
|
||||
score.IsSeed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// retryUntilSeedResponds loops with exponential backoff until at least one
|
||||
// configured seed is reachable again. Once seeds are back in the pool it
|
||||
// nudges the heartbeat loop and lets the normal DHT upgrade path take over.
|
||||
// Should be called in a goroutine — it blocks until the situation resolves.
|
||||
// Panics immediately if no seeds are configured: there is nothing to wait for.
|
||||
func retryUntilSeedResponds() {
|
||||
if !retryRunning.CompareAndSwap(false, true) {
|
||||
return // another goroutine is already running the retry loop
|
||||
}
|
||||
defer retryRunning.Store(false)
|
||||
|
||||
logger := oclib.GetLogger()
|
||||
rawAddresses := strings.TrimSpace(conf.GetConfig().IndexerAddresses)
|
||||
if rawAddresses == "" {
|
||||
// No seeds configured: rely on the inbound-connection notifee to fill
|
||||
// the pool. Just wait patiently — the loop below will return as soon
|
||||
// as any peer connects and NudgeIt() is called.
|
||||
logger.Warn().Msg("[pool] pool empty and no seeds configured — waiting for inbound indexer")
|
||||
}
|
||||
backoff := 10 * time.Second
|
||||
const maxBackoff = 5 * time.Minute
|
||||
for {
|
||||
time.Sleep(backoff)
|
||||
if backoff < maxBackoff {
|
||||
backoff *= 2
|
||||
}
|
||||
// Check whether someone else already refilled the pool.
|
||||
if len(Indexers.GetAddrs()) > 0 {
|
||||
logger.Info().Msg("[pool] pool refilled externally, stopping seed retry")
|
||||
return
|
||||
}
|
||||
logger.Warn().Dur("backoff", backoff).Msg("[pool] still isolated, retrying seeds")
|
||||
reconnectToSeeds()
|
||||
if len(Indexers.GetAddrs()) > 0 {
|
||||
Indexers.NudgeIt()
|
||||
// Re-bootstrap DHT now that we have at least one connection candidate.
|
||||
if discoveryDHT != nil {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
discoveryDHT.Bootstrap(ctx) //nolint:errcheck
|
||||
cancel()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensureScore returns the Score for addr, creating it if absent.
|
||||
func ensureScore(d *Directory, addr string) *Score {
|
||||
if !d.ExistsScore(addr) {
|
||||
d.SetScore(addr, &Score{
|
||||
FirstContacted: time.Now().UTC(),
|
||||
UptimeTracker: &UptimeTracker{FirstSeen: time.Now().UTC()},
|
||||
nextChallenge: rand.Intn(10) + 1,
|
||||
})
|
||||
}
|
||||
return d.GetScore(addr)
|
||||
}
|
||||
|
||||
// evictPeer removes addr from directory atomically and returns a snapshot of
|
||||
// remaining AddrInfos (for consensus voter selection).
|
||||
func evictPeer(d *Directory, addr string, id pp.ID, proto protocol.ID) []pp.AddrInfo {
|
||||
d.Streams.Delete(proto, &id)
|
||||
d.DeleteAddr(addr)
|
||||
voters := make([]pp.AddrInfo, 0, len(d.Addrs))
|
||||
for _, ai := range d.GetAddrs() {
|
||||
if ai.Info != nil {
|
||||
voters = append(voters, *ai.Info)
|
||||
}
|
||||
}
|
||||
d.DeleteScore(addr)
|
||||
return voters
|
||||
}
|
||||
|
||||
// handleSuggestions adds unknown suggested indexers to the directory.
|
||||
func handleSuggestions(d *Directory, from string, suggestions []pp.AddrInfo) {
|
||||
added := 0
|
||||
for _, sug := range suggestions {
|
||||
key := addrKey(sug)
|
||||
if !d.ExistsAddr(key) {
|
||||
cpy := sug
|
||||
d.SetAddr(key, &cpy)
|
||||
added++
|
||||
}
|
||||
}
|
||||
if added > 0 {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Int("added", added).Str("from", from).
|
||||
Msg("added suggested indexers from heartbeat response")
|
||||
d.NudgeIt()
|
||||
}
|
||||
}
|
||||
|
||||
// SendHeartbeat starts a goroutine that sends periodic heartbeats to peers.
|
||||
// recordFn, when provided, is called on each tick and its output is embedded in
|
||||
// the heartbeat as a fresh signed PeerRecord so the receiving indexer can
|
||||
// republish it to the DHT without an extra round-trip.
|
||||
// Pass no recordFn (or nil) for indexer→indexer / native heartbeats.
|
||||
func SendHeartbeat(ctx context.Context, proto protocol.ID, name string, h host.Host, directory *Directory, interval time.Duration, maxPool int, recordFn ...func() json.RawMessage) {
|
||||
logger := oclib.GetLogger()
|
||||
isIndexerHB := directory == Indexers
|
||||
var recFn func() json.RawMessage
|
||||
if len(recordFn) > 0 {
|
||||
recFn = recordFn[0]
|
||||
}
|
||||
go func() {
|
||||
logger.Info().Str("proto", string(proto)).Int("peers", len(directory.Addrs)).Msg("heartbeat started")
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
|
||||
// peerEntry pairs addr key with AddrInfo so doTick can update score maps directly.
|
||||
type peerEntry struct {
|
||||
addr string
|
||||
ai *pp.AddrInfo
|
||||
}
|
||||
|
||||
doTick := func() {
|
||||
addrs := directory.GetAddrsStr()
|
||||
need := maxPool - len(addrs)
|
||||
if need < 0 {
|
||||
need = 0
|
||||
}
|
||||
baseHB := Heartbeat{
|
||||
Name: name,
|
||||
PeerID: h.ID().String(),
|
||||
Timestamp: time.Now().UTC().Unix(),
|
||||
IndexersBinded: addrs,
|
||||
Need: need,
|
||||
}
|
||||
if recFn != nil {
|
||||
baseHB.Record = recFn()
|
||||
}
|
||||
// Piggyback SWIM membership events on every outgoing heartbeat batch.
|
||||
// All peers in the pool receive the same events this tick.
|
||||
if isIndexerHB {
|
||||
baseHB.MembershipEvents = NodeEventQueue.Drain(5)
|
||||
}
|
||||
// Determine the referent indexer: highest-scored one receives Referent=true
|
||||
// so it stores us in its referencedNodes for distributed search.
|
||||
var referentAddr string
|
||||
if isIndexerHB {
|
||||
var bestScore float64 = -1
|
||||
for _, ai2 := range directory.GetAddrs() {
|
||||
if s := directory.GetScore(ai2.Addr); s != nil && s.Score > bestScore {
|
||||
bestScore = s.Score
|
||||
referentAddr = ai2.Addr
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, ai := range directory.GetAddrs() {
|
||||
// Build per-peer heartbeat copy so challenge injection is peer-specific.
|
||||
hb := baseHB
|
||||
if isIndexerHB && referentAddr != "" && ai.Addr == referentAddr {
|
||||
hb.Referent = true
|
||||
}
|
||||
// SWIM: signal suspicion so the peer can refute by incrementing incarnation.
|
||||
if isIndexerHB {
|
||||
if score := directory.GetScore(ai.Addr); score != nil && !score.UptimeTracker.SuspectedAt.IsZero() {
|
||||
inc := score.UptimeTracker.LastKnownIncarnation
|
||||
hb.SuspectedIncarnation = &inc
|
||||
}
|
||||
}
|
||||
// Ensure an IndexerScore entry exists for this peer.
|
||||
var score *Score
|
||||
if isIndexerHB {
|
||||
score = ensureScore(directory, ai.Addr)
|
||||
|
||||
// Inject challenge batch if due (random 1-10 HBs between batches).
|
||||
score.hbCount++
|
||||
if score.hbCount >= score.nextChallenge {
|
||||
// Ground truth: node's own PeerID — indexer MUST have us.
|
||||
challenges := []string{h.ID().String()}
|
||||
// Add up to 2 more known peers (other indexers) for richer data.
|
||||
// Use the already-snapshotted entries to avoid re-locking.
|
||||
for _, ai2 := range directory.GetAddrs() {
|
||||
if ai2.Addr != ai.Addr && ai2.Info != nil {
|
||||
challenges = append(challenges, ai2.Info.ID.String())
|
||||
if len(challenges) >= 3 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
hb.Challenges = challenges
|
||||
score.hbCount = 0
|
||||
score.nextChallenge = rand.Intn(10) + 1
|
||||
score.challengeTotal++ // count own-PeerID challenge (ground truth)
|
||||
score.dhtBatchCounter++
|
||||
// DHT challenge every 5th batch: ask indexer to retrieve our own DID.
|
||||
if score.dhtBatchCounter%5 == 0 {
|
||||
var selfDID string
|
||||
if len(baseHB.Record) > 0 {
|
||||
var partial struct {
|
||||
DID string `json:"did"`
|
||||
}
|
||||
if json.Unmarshal(baseHB.Record, &partial) == nil {
|
||||
selfDID = partial.DID
|
||||
}
|
||||
}
|
||||
if selfDID != "" {
|
||||
hb.ChallengeDID = selfDID
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, rtt, err := sendHeartbeat(ctx, h, proto, ai.Info, hb, directory.Streams, interval*time.Second)
|
||||
if err != nil { // Heartbeat fails
|
||||
HeartbeatFailure(h, proto, directory, ai.Addr, ai.Info, isIndexerHB, maxPool, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Update IndexerScore — uptime recorded on any successful send,
|
||||
// even if the indexer does not support bidirectional heartbeat (Fix 1).
|
||||
if isIndexerHB && score != nil {
|
||||
score.UptimeTracker.RecordHeartbeat()
|
||||
score.UptimeTracker.ConsecutiveFails = 0 // reset on success
|
||||
|
||||
// SWIM: clear suspect state on any successful direct heartbeat.
|
||||
// The peer proved it is reachable; if it also incremented its incarnation
|
||||
// that is an explicit refutation — log it distinctly.
|
||||
if !score.UptimeTracker.SuspectedAt.IsZero() {
|
||||
wasExplicitRefutation := resp != nil &&
|
||||
resp.Incarnation > 0 &&
|
||||
resp.Incarnation > score.UptimeTracker.LastKnownIncarnation
|
||||
if wasExplicitRefutation {
|
||||
logger.Info().Str("peer", ai.Info.ID.String()).
|
||||
Uint64("old_incarnation", score.UptimeTracker.LastKnownIncarnation).
|
||||
Uint64("new_incarnation", resp.Incarnation).
|
||||
Msg("[swim] explicit refutation: incarnation incremented, suspicion cleared")
|
||||
} else {
|
||||
logger.Info().Str("peer", ai.Info.ID.String()).
|
||||
Msg("[swim] suspect cleared — peer responded to direct probe")
|
||||
}
|
||||
score.UptimeTracker.SuspectedAt = time.Time{}
|
||||
// Propagate alive event so other nodes can clear their own suspect state.
|
||||
inc := score.UptimeTracker.LastKnownIncarnation
|
||||
if resp != nil && resp.Incarnation > 0 {
|
||||
inc = resp.Incarnation
|
||||
}
|
||||
NodeEventQueue.Add(MemberEvent{
|
||||
Type: MemberAlive,
|
||||
PeerID: ai.Info.ID.String(),
|
||||
Incarnation: inc,
|
||||
HopsLeft: InitialEventHops,
|
||||
})
|
||||
}
|
||||
// Always update last known incarnation.
|
||||
if resp != nil && resp.Incarnation > score.UptimeTracker.LastKnownIncarnation {
|
||||
score.UptimeTracker.LastKnownIncarnation = resp.Incarnation
|
||||
}
|
||||
|
||||
maxRTT := BaseRoundTrip * 10
|
||||
latencyScore := 1.0 - float64(rtt)/float64(maxRTT)
|
||||
if latencyScore < 0 {
|
||||
latencyScore = 0
|
||||
}
|
||||
if latencyScore > 1 {
|
||||
latencyScore = 1
|
||||
}
|
||||
|
||||
// Update fill / challenge fields only when the indexer responded.
|
||||
if resp != nil {
|
||||
// BornAt stability check.
|
||||
if score.LastBornAt.IsZero() {
|
||||
score.LastBornAt = resp.BornAt
|
||||
} else if !resp.BornAt.IsZero() && !resp.BornAt.Equal(score.LastBornAt) {
|
||||
score.bornAtChanges++
|
||||
score.LastBornAt = resp.BornAt
|
||||
logger.Warn().Str("peer", ai.Info.ID.String()).
|
||||
Int("changes", score.bornAtChanges).
|
||||
Msg("indexer BornAt changed — possible restart or impersonation")
|
||||
}
|
||||
score.LastFillRate = resp.FillRate
|
||||
|
||||
// Fill rate consistency: cross-check peerCount/maxNodes vs reported fillRate.
|
||||
if resp.MaxNodes > 0 {
|
||||
expected := float64(resp.PeerCount) / float64(resp.MaxNodes)
|
||||
diff := expected - resp.FillRate
|
||||
if diff < 0 {
|
||||
diff = -diff
|
||||
}
|
||||
score.fillChecked++
|
||||
if diff < 0.1 {
|
||||
score.fillConsistent++
|
||||
}
|
||||
}
|
||||
|
||||
// Validate challenge responses. Only own-PeerID counts as ground truth.
|
||||
if len(hb.Challenges) > 0 && len(resp.Challenges) > 0 {
|
||||
ownID := h.ID().String()
|
||||
for _, ce := range resp.Challenges {
|
||||
if ce.PeerID != ownID {
|
||||
continue // informational only
|
||||
}
|
||||
recentEnough := !ce.LastSeen.IsZero() &&
|
||||
time.Since(ce.LastSeen) < 2*RecommendedHeartbeatInterval
|
||||
if ce.Found && recentEnough {
|
||||
score.challengeCorrect++
|
||||
}
|
||||
logger.Info().Str("peer", ai.Info.ID.String()).
|
||||
Bool("found", ce.Found).
|
||||
Bool("recent", recentEnough).
|
||||
Msg("own-PeerID challenge result")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// DHT challenge result.
|
||||
if hb.ChallengeDID != "" {
|
||||
score.dhtChecked++
|
||||
if resp.DHTFound {
|
||||
score.dhtSuccess++
|
||||
}
|
||||
}
|
||||
|
||||
// Launch witness cross-check asynchronously (must not hold lock).
|
||||
if len(resp.Witnesses) > 0 {
|
||||
go queryWitnesses(h, ai.Info.ID.String(), resp.BornAt, resp.FillRate, resp.Witnesses, score)
|
||||
} else if resp.MaxNodes > 0 {
|
||||
// No witnesses offered. Valid if indexer only has us (PeerCount==1).
|
||||
// Cross-check: FillRate should equal 1/MaxNodes within ±10%.
|
||||
expected := 1.0 / float64(resp.MaxNodes)
|
||||
diff := resp.FillRate - expected
|
||||
if diff < 0 {
|
||||
diff = -diff
|
||||
}
|
||||
score.witnessChecked++
|
||||
if resp.PeerCount == 1 && diff < 0.1 {
|
||||
score.witnessConsistent++
|
||||
}
|
||||
}
|
||||
|
||||
// SWIM infection: process membership events piggybacked on this response.
|
||||
// Events with HopsLeft > 0 are re-queued for forwarding to other indexers.
|
||||
for _, ev := range resp.MembershipEvents {
|
||||
if ev.HopsLeft > 0 {
|
||||
NodeEventQueue.Add(ev)
|
||||
}
|
||||
applyMemberEvent(ev, directory)
|
||||
}
|
||||
}
|
||||
|
||||
score.Score = score.ComputeNodeSideScore(latencyScore)
|
||||
age := score.UptimeTracker.Uptime()
|
||||
minScore := dynamicMinScore(age)
|
||||
// Fix 4: grace period — at least 2 full heartbeat cycles before ejecting.
|
||||
isSeed := score.IsSeed
|
||||
// Seeds are sticky: never evicted by score alone (SuggestMigrate handles it).
|
||||
// Never eject the last indexer by score alone — we would lose all connectivity.
|
||||
belowThreshold := score.Score < minScore &&
|
||||
score.UptimeTracker.TotalOnline >= 2*RecommendedHeartbeatInterval &&
|
||||
!isSeed &&
|
||||
len(directory.Addrs) > 1
|
||||
|
||||
if belowThreshold {
|
||||
logger.Info().Str("peer", ai.Info.ID.String()).
|
||||
Float64("score", score.Score).Float64("min", minScore).
|
||||
Msg("indexer score below threshold, removing from pool")
|
||||
voters := evictPeer(directory, ai.Addr, ai.Info.ID, proto)
|
||||
need := max(maxPool-len(voters), 1)
|
||||
if len(voters) > 0 {
|
||||
go TriggerConsensus(h, voters, need)
|
||||
} else {
|
||||
go replenishIndexersFromDHT(h, need)
|
||||
}
|
||||
}
|
||||
|
||||
// Accept suggestions from this indexer — add unknown ones to the directory.
|
||||
if resp != nil && len(resp.Suggestions) > 0 {
|
||||
handleSuggestions(directory, ai.Info.ID.String(), resp.Suggestions)
|
||||
}
|
||||
|
||||
// Handle SuggestMigrate: indexer is overloaded and wants us to move.
|
||||
if resp != nil && resp.SuggestMigrate && isIndexerHB {
|
||||
nonSeedCount := 0
|
||||
for _, sc := range directory.GetScores() {
|
||||
if !sc.IsSeed {
|
||||
nonSeedCount++
|
||||
}
|
||||
}
|
||||
if nonSeedCount >= conf.GetConfig().MinIndexer {
|
||||
if isSeed {
|
||||
// Seed has offloaded us: clear sticky flag, score eviction takes over.
|
||||
score.IsSeed = false
|
||||
logger.Info().Str("peer", ai.Info.ID.String()).
|
||||
Msg("seed discharged via SuggestMigrate, de-stickied")
|
||||
} else {
|
||||
evictPeer(directory, ai.Addr, ai.Info.ID, proto)
|
||||
logger.Info().Str("peer", ai.Info.ID.String()).Msg("accepted migration from overloaded indexer")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
doTick()
|
||||
case <-directory.Nudge:
|
||||
if isIndexerHB {
|
||||
logger.Info().Msg("nudge received, heartbeating new indexers immediately")
|
||||
doTick()
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// runIndirectProbe asks up to k live indexers (voters) to probe target via
|
||||
// ProtocolBandwidthProbe and returns true if the majority report reachable.
|
||||
// This is the SWIM explicit indirect ping — called only on heartbeat failure.
|
||||
func runIndirectProbe(h host.Host, target pp.AddrInfo, voters []Entry, k int) bool {
|
||||
if k > len(voters) {
|
||||
k = len(voters)
|
||||
}
|
||||
if k == 0 {
|
||||
return false
|
||||
}
|
||||
shuffled := make([]Entry, len(voters))
|
||||
copy(shuffled, voters)
|
||||
rand.Shuffle(len(shuffled), func(i, j int) { shuffled[i], shuffled[j] = shuffled[j], shuffled[i] })
|
||||
shuffled = shuffled[:k]
|
||||
|
||||
type result struct{ reachable bool }
|
||||
ch := make(chan result, k)
|
||||
for _, voter := range shuffled {
|
||||
if voter.Info == nil {
|
||||
ch <- result{false}
|
||||
continue
|
||||
}
|
||||
go func(v pp.AddrInfo) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
|
||||
defer cancel()
|
||||
s, err := h.NewStream(ctx, v.ID, ProtocolIndirectProbe)
|
||||
if err != nil {
|
||||
ch <- result{false}
|
||||
return
|
||||
}
|
||||
s.SetDeadline(time.Now().Add(8 * time.Second))
|
||||
defer s.Close()
|
||||
if err := json.NewEncoder(s).Encode(IndirectProbeRequest{Target: target}); err != nil {
|
||||
ch <- result{false}
|
||||
return
|
||||
}
|
||||
var resp IndirectProbeResponse
|
||||
if err := json.NewDecoder(s).Decode(&resp); err != nil {
|
||||
ch <- result{false}
|
||||
return
|
||||
}
|
||||
ch <- result{resp.Reachable}
|
||||
}(*voter.Info)
|
||||
}
|
||||
reachable := 0
|
||||
for range k {
|
||||
if (<-ch).reachable {
|
||||
reachable++
|
||||
}
|
||||
}
|
||||
return reachable > k/2
|
||||
}
|
||||
|
||||
func HeartbeatFailure(h host.Host, proto protocol.ID, directory *Directory,
|
||||
addr string, info *pp.AddrInfo, isIndexerHB bool, maxPool int, err error) {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Err(err)
|
||||
// Seeds are never evicted on heartbeat failure.
|
||||
// Keeping them in the pool lets the regular 60-second ticker retry them
|
||||
// at a natural cadence — no reconnect storm, no libp2p dial-backoff accumulation.
|
||||
// A seed will self-heal once it comes back; DHT and inbound peers fill the gap.
|
||||
if isIndexerHB {
|
||||
if score := directory.GetScore(addr); score != nil {
|
||||
if score.IsSeed {
|
||||
logger.Warn().Str("peer", info.ID.String()).
|
||||
Msg("[pool] seed heartbeat failed — keeping in pool, ticker will retry " + err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
voters := directory.GetAddrs()
|
||||
if len(voters) <= 1 {
|
||||
// Last indexer: no peer available to proxy a probe.
|
||||
// Enter suspect state on first failure; evict only after suspectTimeout.
|
||||
if score.UptimeTracker.SuspectedAt.IsZero() {
|
||||
score.UptimeTracker.SuspectedAt = time.Now().UTC()
|
||||
score.UptimeTracker.ConsecutiveFails++
|
||||
NodeEventQueue.Add(MemberEvent{
|
||||
Type: MemberSuspect,
|
||||
PeerID: info.ID.String(),
|
||||
Incarnation: score.UptimeTracker.LastKnownIncarnation,
|
||||
HopsLeft: InitialEventHops,
|
||||
})
|
||||
logger.Warn().Str("peer", info.ID.String()).
|
||||
Msg("[swim] last indexer suspect — waiting for refutation or timeout")
|
||||
return
|
||||
}
|
||||
if time.Since(score.UptimeTracker.SuspectedAt) < suspectTimeout {
|
||||
logger.Warn().Str("peer", info.ID.String()).
|
||||
Dur("suspected_for", time.Since(score.UptimeTracker.SuspectedAt)).
|
||||
Msg("[swim] last indexer still failing, holding in suspect state")
|
||||
return
|
||||
}
|
||||
// suspectTimeout exceeded with no refutation — declare dead.
|
||||
logger.Warn().Str("peer", info.ID.String()).
|
||||
Msg("[swim] last indexer suspect timeout exceeded, evicting")
|
||||
NodeEventQueue.Add(MemberEvent{
|
||||
Type: MemberDead,
|
||||
PeerID: info.ID.String(),
|
||||
Incarnation: score.UptimeTracker.LastKnownIncarnation,
|
||||
HopsLeft: InitialEventHops,
|
||||
})
|
||||
} else if score.UptimeTracker.SuspectedAt.IsZero() {
|
||||
// First miss with other live indexers available:
|
||||
// enter suspect state and run an indirect probe asynchronously.
|
||||
score.UptimeTracker.SuspectedAt = time.Now().UTC()
|
||||
score.UptimeTracker.ConsecutiveFails++
|
||||
NodeEventQueue.Add(MemberEvent{
|
||||
Type: MemberSuspect,
|
||||
PeerID: info.ID.String(),
|
||||
Incarnation: score.UptimeTracker.LastKnownIncarnation,
|
||||
HopsLeft: InitialEventHops,
|
||||
})
|
||||
probeTarget := *info
|
||||
go func() {
|
||||
alive := runIndirectProbe(h, probeTarget, voters, 2)
|
||||
if alive {
|
||||
// Other indexers confirm the target is reachable → our direct
|
||||
// link may be temporarily broken. Keep suspected; the next
|
||||
// heartbeat tick will retry the direct probe.
|
||||
logger.Warn().Str("peer", probeTarget.ID.String()).
|
||||
Msg("[swim] indirect probe: target reachable by peers, keeping (suspected)")
|
||||
} else {
|
||||
// Majority of probes also failed → the indexer is genuinely dead.
|
||||
logger.Warn().Str("peer", probeTarget.ID.String()).
|
||||
Msg("[swim] indirect probe: target unreachable, evicting")
|
||||
NodeEventQueue.Add(MemberEvent{
|
||||
Type: MemberDead,
|
||||
PeerID: probeTarget.ID.String(),
|
||||
Incarnation: score.UptimeTracker.LastKnownIncarnation,
|
||||
HopsLeft: InitialEventHops,
|
||||
})
|
||||
consensusVoters := evictPeer(directory, addr, probeTarget.ID, proto)
|
||||
need := max(maxPool-len(consensusVoters), 1)
|
||||
if len(consensusVoters) > 0 {
|
||||
TriggerConsensus(h, consensusVoters, need)
|
||||
} else {
|
||||
replenishIndexersFromDHT(h, need)
|
||||
}
|
||||
}
|
||||
}()
|
||||
return // decision deferred to probe goroutine
|
||||
} else if time.Since(score.UptimeTracker.SuspectedAt) < suspectTimeout {
|
||||
// Still within suspect window — the next tick's SuspectedIncarnation
|
||||
// in the heartbeat may trigger a refutation. Keep retrying.
|
||||
logger.Warn().Str("peer", info.ID.String()).
|
||||
Dur("suspected_for", time.Since(score.UptimeTracker.SuspectedAt)).
|
||||
Msg("[swim] suspected peer still failing, waiting for refutation or timeout")
|
||||
return
|
||||
} else {
|
||||
// suspectTimeout exceeded — declare dead and fall through to eviction.
|
||||
logger.Warn().Str("peer", info.ID.String()).
|
||||
Msg("[swim] suspect timeout exceeded, evicting")
|
||||
NodeEventQueue.Add(MemberEvent{
|
||||
Type: MemberDead,
|
||||
PeerID: info.ID.String(),
|
||||
Incarnation: score.UptimeTracker.LastKnownIncarnation,
|
||||
HopsLeft: InitialEventHops,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info().Str("peer", info.ID.String()).Str("proto", string(proto)).
|
||||
Msg("heartbeat failed, removing peer from pool : " + err.Error())
|
||||
consensusVoters := evictPeer(directory, addr, info.ID, proto)
|
||||
if isIndexerHB {
|
||||
need := maxPool - len(consensusVoters)
|
||||
if need < 1 {
|
||||
need = 1
|
||||
}
|
||||
logger.Info().Int("remaining", len(consensusVoters)).Int("need", need).Msg("pool state after removal")
|
||||
poolSize := len(directory.GetAddrs())
|
||||
if poolSize == 0 {
|
||||
// Pool is truly empty (no seeds configured or no seeds in pool).
|
||||
// Start the backoff retry loop — it will re-add seeds and nudge
|
||||
// only once a seed actually responds.
|
||||
go retryUntilSeedResponds()
|
||||
} else if len(consensusVoters) > 0 {
|
||||
go TriggerConsensus(h, consensusVoters, need)
|
||||
} else {
|
||||
go replenishIndexersFromDHT(h, need)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// applyMemberEvent applies an incoming SWIM membership event to the local directory.
|
||||
// Only MemberAlive events with a higher incarnation can clear an existing suspect state;
|
||||
// MemberSuspect / MemberDead from gossip are informational — we do not act on them
|
||||
// unilaterally since the node has its own direct-probe evidence.
|
||||
func applyMemberEvent(ev MemberEvent, directory *Directory) {
|
||||
if ev.Type != MemberAlive {
|
||||
return
|
||||
}
|
||||
logger := oclib.GetLogger()
|
||||
for _, ai := range directory.GetAddrs() {
|
||||
if ai.Info == nil || ai.Info.ID.String() != ev.PeerID {
|
||||
continue
|
||||
}
|
||||
score := directory.GetScore(ai.Addr)
|
||||
if score == nil || score.UptimeTracker == nil {
|
||||
return
|
||||
}
|
||||
if ev.Incarnation > score.UptimeTracker.LastKnownIncarnation {
|
||||
score.UptimeTracker.LastKnownIncarnation = ev.Incarnation
|
||||
if !score.UptimeTracker.SuspectedAt.IsZero() {
|
||||
score.UptimeTracker.SuspectedAt = time.Time{}
|
||||
score.UptimeTracker.ConsecutiveFails = 0
|
||||
logger.Info().Str("peer", ev.PeerID).
|
||||
Uint64("incarnation", ev.Incarnation).
|
||||
Msg("[swim] alive event via gossip cleared suspicion")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,8 @@ type Event struct {
|
||||
Type string `json:"type"`
|
||||
From string `json:"from"` // peerID
|
||||
|
||||
User string
|
||||
User string
|
||||
Groups []string
|
||||
|
||||
DataType int64 `json:"datatype"`
|
||||
Timestamp int64 `json:"ts"`
|
||||
@@ -28,7 +29,7 @@ type Event struct {
|
||||
}
|
||||
|
||||
func NewEvent(name string, from string, dt *tools.DataType, user string, payload []byte) *Event {
|
||||
priv, err := LoadKeyFromFilePrivate() // your node private key
|
||||
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -73,7 +74,11 @@ func (event *Event) Verify(p *peer.Peer) error {
|
||||
if p.Relation == peer.BLACKLIST { // if peer is blacklisted... quit...
|
||||
return errors.New("peer is blacklisted")
|
||||
}
|
||||
pubKey, err := PubKeyFromString(p.PublicKey) // extract pubkey from pubkey str
|
||||
return event.VerifySignature(p.PublicKey)
|
||||
}
|
||||
|
||||
func (event *Event) VerifySignature(pk string) error {
|
||||
pubKey, err := PubKeyFromString(pk) // extract pubkey from pubkey str
|
||||
if err != nil {
|
||||
return errors.New("pubkey is malformed")
|
||||
}
|
||||
@@ -88,11 +93,11 @@ func (event *Event) Verify(p *peer.Peer) error {
|
||||
}
|
||||
|
||||
type TopicNodeActivityPub struct {
|
||||
NodeActivity peer.PeerState
|
||||
Disposer pp.AddrInfo `json:"disposer_address"`
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"` // real PEER ID
|
||||
PeerID string `json:"peer_id"`
|
||||
NodeActivity int `json:"node_activity"`
|
||||
Disposer string `json:"disposer_address"`
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"` // real PEER ID
|
||||
PeerID string `json:"peer_id"`
|
||||
}
|
||||
|
||||
type LongLivedPubSubService struct {
|
||||
@@ -108,6 +113,12 @@ func NewLongLivedPubSubService(h host.Host) *LongLivedPubSubService {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LongLivedPubSubService) GetPubSub(topicName string) *pubsub.Topic {
|
||||
s.PubsubMu.Lock()
|
||||
defer s.PubsubMu.Unlock()
|
||||
return s.LongLivedPubSubs[topicName]
|
||||
}
|
||||
|
||||
func (s *LongLivedPubSubService) processEvent(
|
||||
ctx context.Context,
|
||||
p *peer.Peer,
|
||||
@@ -119,26 +130,8 @@ func (s *LongLivedPubSubService) processEvent(
|
||||
return handler(ctx, topicName, event)
|
||||
}
|
||||
|
||||
const TopicPubSubNodeActivity = "oc-node-activity"
|
||||
const TopicPubSubSearch = "oc-node-search"
|
||||
|
||||
func (s *LongLivedPubSubService) SubscribeToNodeActivity(ps *pubsub.PubSub, f *func(context.Context, TopicNodeActivityPub, string)) error {
|
||||
ps.RegisterTopicValidator(TopicPubSubNodeActivity, func(ctx context.Context, p pp.ID, m *pubsub.Message) bool {
|
||||
return true
|
||||
})
|
||||
if topic, err := ps.Join(TopicPubSubNodeActivity); err != nil {
|
||||
return err
|
||||
} else {
|
||||
s.PubsubMu.Lock()
|
||||
defer s.PubsubMu.Unlock()
|
||||
s.LongLivedPubSubs[TopicPubSubNodeActivity] = topic
|
||||
}
|
||||
if f != nil {
|
||||
return SubscribeEvents(s, context.Background(), TopicPubSubNodeActivity, -1, *f)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LongLivedPubSubService) SubscribeToSearch(ps *pubsub.PubSub, f *func(context.Context, Event, string)) error {
|
||||
ps.RegisterTopicValidator(TopicPubSubSearch, func(ctx context.Context, p pp.ID, m *pubsub.Message) bool {
|
||||
return true
|
||||
@@ -147,55 +140,72 @@ func (s *LongLivedPubSubService) SubscribeToSearch(ps *pubsub.PubSub, f *func(co
|
||||
return err
|
||||
} else {
|
||||
s.PubsubMu.Lock()
|
||||
defer s.PubsubMu.Unlock()
|
||||
s.LongLivedPubSubs[TopicPubSubSearch] = topic
|
||||
s.PubsubMu.Unlock()
|
||||
}
|
||||
if f != nil {
|
||||
return SubscribeEvents(s, context.Background(), TopicPubSubSearch, -1, *f)
|
||||
}
|
||||
// Even when no handler is needed (e.g. strict indexers), we must call
|
||||
// topic.Subscribe() so that this peer sends a SUBSCRIBE control message
|
||||
// to connected peers and joins the GossipSub mesh as a forwarder.
|
||||
// Without this, messages cannot be relayed through indexers between nodes.
|
||||
topic := s.LongLivedPubSubs[TopicPubSubSearch]
|
||||
sub, err := topic.Subscribe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go func() {
|
||||
for {
|
||||
if _, err := sub.Next(context.Background()); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func SubscribeEvents[T interface{}](s *LongLivedPubSubService,
|
||||
ctx context.Context, proto string, timeout int, f func(context.Context, T, string),
|
||||
) error {
|
||||
s.PubsubMu.Lock()
|
||||
if s.LongLivedPubSubs[proto] == nil {
|
||||
s.PubsubMu.Unlock()
|
||||
return errors.New("no protocol subscribed in pubsub")
|
||||
}
|
||||
topic := s.LongLivedPubSubs[proto]
|
||||
s.PubsubMu.Unlock()
|
||||
|
||||
sub, err := topic.Subscribe() // then subscribe to it
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// launch loop waiting for results.
|
||||
go waitResults[T](s, ctx, sub, proto, timeout, f)
|
||||
go waitResults(topic, s, ctx, sub, proto, timeout, f)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitResults[T interface{}](s *LongLivedPubSubService, ctx context.Context, sub *pubsub.Subscription, proto string, timeout int, f func(context.Context, T, string)) {
|
||||
func waitResults[T interface{}](topic *pubsub.Topic, s *LongLivedPubSubService, ctx context.Context, sub *pubsub.Subscription, proto string, timeout int, f func(context.Context, T, string)) {
|
||||
defer ctx.Done()
|
||||
for {
|
||||
s.PubsubMu.Lock() // check safely if cache is actually notified subscribed to topic
|
||||
if s.LongLivedPubSubs[proto] == nil { // if not kill the loop.
|
||||
break
|
||||
s.LongLivedPubSubs[proto] = topic
|
||||
}
|
||||
s.PubsubMu.Unlock()
|
||||
|
||||
// if still subscribed -> wait for new message
|
||||
var cancel context.CancelFunc
|
||||
if timeout != -1 {
|
||||
ctx, cancel = context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
msg, err := sub.Next(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
// timeout hit, no message before deadline kill subsciption.
|
||||
s.PubsubMu.Lock()
|
||||
if s.LongLivedPubSubs[proto] != nil {
|
||||
s.LongLivedPubSubs[proto].Close()
|
||||
}
|
||||
delete(s.LongLivedPubSubs, proto)
|
||||
s.PubsubMu.Unlock()
|
||||
return
|
||||
@@ -207,10 +217,5 @@ func waitResults[T interface{}](s *LongLivedPubSubService, ctx context.Context,
|
||||
continue
|
||||
}
|
||||
f(ctx, evt, fmt.Sprintf("%v", proto))
|
||||
/*if p, err := ps.Node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
|
||||
if err := ps.processEvent(ctx, p[0], &evt, topicName); err != nil {
|
||||
logger.Err(err)
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
188
daemons/node/common/common_scoring.go
Normal file
188
daemons/node/common/common_scoring.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
cr "crypto/rand"
|
||||
"io"
|
||||
"net"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const MaxExpectedMbps = 100.0
|
||||
const MinPayloadChallenge = 512
|
||||
const MaxPayloadChallenge = 2048
|
||||
const BaseRoundTrip = 400 * time.Millisecond
|
||||
|
||||
type UptimeTracker struct {
|
||||
FirstSeen time.Time
|
||||
LastSeen time.Time
|
||||
TotalOnline time.Duration
|
||||
ConsecutiveFails int // kept for compatibility / logging; primary eviction uses SuspectedAt
|
||||
SuspectedAt time.Time // SWIM: non-zero when this peer is in suspect state
|
||||
// LastKnownIncarnation is the last incarnation number received from this peer.
|
||||
// When a peer sees itself suspected (SuspectedIncarnation in heartbeat) it
|
||||
// increments its incarnation and the node clears the suspect state on receipt.
|
||||
LastKnownIncarnation uint64
|
||||
}
|
||||
|
||||
// RecordHeartbeat accumulates online time gap-aware: only counts the interval if
|
||||
// the gap since the last heartbeat is within 2× the recommended interval (i.e. no
|
||||
// extended outage). Call this each time a heartbeat is successfully processed.
|
||||
func (u *UptimeTracker) RecordHeartbeat() {
|
||||
now := time.Now().UTC()
|
||||
if !u.LastSeen.IsZero() {
|
||||
gap := now.Sub(u.LastSeen)
|
||||
if gap <= 2*RecommendedHeartbeatInterval {
|
||||
u.TotalOnline += gap
|
||||
}
|
||||
}
|
||||
u.LastSeen = now
|
||||
}
|
||||
|
||||
func (u *UptimeTracker) Uptime() time.Duration {
|
||||
return time.Since(u.FirstSeen)
|
||||
}
|
||||
|
||||
// UptimeRatio returns the fraction of tracked lifetime during which the peer was
|
||||
// continuously online (gap ≤ 2×RecommendedHeartbeatInterval). Returns 0 before
|
||||
// the first heartbeat interval has elapsed.
|
||||
func (u *UptimeTracker) UptimeRatio() float64 {
|
||||
total := time.Since(u.FirstSeen)
|
||||
if total <= 0 {
|
||||
return 0
|
||||
}
|
||||
ratio := float64(u.TotalOnline) / float64(total)
|
||||
if ratio > 1 {
|
||||
ratio = 1
|
||||
}
|
||||
return ratio
|
||||
}
|
||||
|
||||
func (u *UptimeTracker) IsEligible(min time.Duration) bool {
|
||||
return u.Uptime() >= min
|
||||
}
|
||||
|
||||
// getBandwidthChallengeRate opens a dedicated ProtocolBandwidthProbe stream to
|
||||
// remotePeer, sends a random payload, reads the echo, and computes throughput
|
||||
// and a latency score. Returns (ok, bpms, latencyScore, error).
|
||||
// latencyScore is 1.0 when RTT is very fast and 0.0 when at or beyond maxRoundTrip.
|
||||
// Using a separate stream avoids mixing binary data on the JSON heartbeat stream
|
||||
// and ensures the echo handler is actually running on the remote side.
|
||||
func getBandwidthChallengeRate(h host.Host, remotePeer pp.ID, payloadSize int) (bool, float64, float64, error) {
|
||||
payload := make([]byte, payloadSize)
|
||||
if _, err := cr.Read(payload); err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
s, err := h.NewStream(ctx, remotePeer, ProtocolBandwidthProbe)
|
||||
if err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
defer s.Reset()
|
||||
s.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
start := time.Now()
|
||||
if _, err = s.Write(payload); err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
s.CloseWrite()
|
||||
// Half-close the write side so the handler's io.Copy sees EOF and stops.
|
||||
// Read the echo.
|
||||
response := make([]byte, payloadSize)
|
||||
if _, err = io.ReadFull(s, response); err != nil {
|
||||
return false, 0, 0, err
|
||||
}
|
||||
|
||||
duration := time.Since(start)
|
||||
maxRoundTrip := BaseRoundTrip + (time.Duration(payloadSize) * (100 * time.Millisecond))
|
||||
mbps := float64(payloadSize*8) / duration.Seconds() / 1e6
|
||||
|
||||
// latencyScore: 1.0 = instant, 0.0 = at maxRoundTrip or beyond.
|
||||
latencyScore := 1.0 - float64(duration)/float64(maxRoundTrip)
|
||||
if latencyScore < 0 {
|
||||
latencyScore = 0
|
||||
}
|
||||
if latencyScore > 1 {
|
||||
latencyScore = 1
|
||||
}
|
||||
|
||||
if duration > maxRoundTrip || mbps < 5.0 {
|
||||
return false, float64(mbps / MaxExpectedMbps), latencyScore, nil
|
||||
}
|
||||
return true, float64(mbps / MaxExpectedMbps), latencyScore, nil
|
||||
}
|
||||
|
||||
func getDiversityRate(h host.Host, peers []string) float64 {
|
||||
peers, _ = checkPeers(h, peers)
|
||||
diverse := []string{}
|
||||
for _, p := range peers {
|
||||
ip, err := ExtractIP(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
div := ip.Mask(net.CIDRMask(24, 32)).String()
|
||||
if !slices.Contains(diverse, div) {
|
||||
diverse = append(diverse, div)
|
||||
}
|
||||
}
|
||||
if len(diverse) == 0 || len(peers) == 0 {
|
||||
return 1
|
||||
}
|
||||
return float64(len(diverse)) / float64(len(peers))
|
||||
}
|
||||
|
||||
// getOwnDiversityRate measures subnet /24 diversity of the indexer's own connected peers.
|
||||
// This evaluates the indexer's network position rather than the connecting node's topology.
|
||||
func getOwnDiversityRate(h host.Host) float64 {
|
||||
diverse := map[string]struct{}{}
|
||||
total := 0
|
||||
for _, pid := range h.Network().Peers() {
|
||||
for _, maddr := range h.Peerstore().Addrs(pid) {
|
||||
total++
|
||||
ip, err := ExtractIP(maddr.String())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
diverse[ip.Mask(net.CIDRMask(24, 32)).String()] = struct{}{}
|
||||
}
|
||||
}
|
||||
if total == 0 {
|
||||
return 1
|
||||
}
|
||||
return float64(len(diverse)) / float64(total)
|
||||
}
|
||||
|
||||
func checkPeers(h host.Host, peers []string) ([]string, []string) {
|
||||
concretePeer := []string{}
|
||||
ips := []string{}
|
||||
for _, p := range peers {
|
||||
ad, err := pp.AddrInfoFromString(p)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if PeerIsAlive(h, *ad) {
|
||||
concretePeer = append(concretePeer, p)
|
||||
if ip, err := ExtractIP(p); err == nil {
|
||||
ips = append(ips, ip.Mask(net.CIDRMask(24, 32)).String())
|
||||
}
|
||||
}
|
||||
}
|
||||
return concretePeer, ips
|
||||
}
|
||||
|
||||
// dynamicMinScore returns the minimum acceptable score for a peer, starting
|
||||
// permissive (20%) for brand-new peers and hardening linearly to 80% over 24h.
|
||||
// This prevents ejecting newcomers in fresh networks while filtering parasites.
|
||||
func dynamicMinScore(age time.Duration) float64 {
|
||||
hours := age.Hours()
|
||||
score := 20.0 + 60.0*(hours/24.0)
|
||||
if score > 80.0 {
|
||||
score = 80.0
|
||||
}
|
||||
return score
|
||||
}
|
||||
358
daemons/node/common/common_service.go
Normal file
358
daemons/node/common/common_service.go
Normal file
@@ -0,0 +1,358 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"oc-discovery/conf"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type LongLivedStreamRecordedService[T interface{}] struct {
|
||||
*LongLivedPubSubService
|
||||
StreamRecords map[protocol.ID]map[pp.ID]*StreamRecord[T]
|
||||
StreamMU sync.RWMutex
|
||||
maxNodesConn int
|
||||
ConnGuard *ConnectionRateGuard
|
||||
|
||||
// AllowInbound, when set, is called once at stream open before any heartbeat
|
||||
// is decoded. remotePeer is the connecting peer; isNew is true when no
|
||||
// StreamRecord exists yet (first-ever connection). Return a non-nil error
|
||||
// to immediately reset the stream and refuse the peer.
|
||||
AllowInbound func(remotePeer pp.ID, isNew bool) error
|
||||
// ValidateHeartbeat, when set, is called inside the heartbeat loop after
|
||||
// each successful CheckHeartbeat decode. Return a non-nil error to reset
|
||||
// the stream and terminate the session.
|
||||
ValidateHeartbeat func(remotePeer pp.ID) error
|
||||
// AfterHeartbeat is called after each successful heartbeat with the full
|
||||
// decoded Heartbeat so the hook can use the fresh embedded PeerRecord.
|
||||
AfterHeartbeat func(hb *Heartbeat)
|
||||
// AfterDelete is called after gc() evicts an expired peer, outside the lock.
|
||||
// name and did may be empty if the HeartbeatStream had no metadata.
|
||||
AfterDelete func(pid pp.ID, name string, did string)
|
||||
// BuildHeartbeatResponse, when set, is called after each successfully decoded
|
||||
// heartbeat to build the response sent back to the node.
|
||||
// remotePeer is the connecting peer. hb is the full decoded heartbeat, including
|
||||
// SWIM fields (SuspectedIncarnation, MembershipEvents) and record/challenge data.
|
||||
BuildHeartbeatResponse func(remotePeer pp.ID, hb *Heartbeat) *HeartbeatResponse
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) MaxNodesConn() int {
|
||||
return ix.maxNodesConn
|
||||
}
|
||||
|
||||
func NewStreamRecordedService[T interface{}](h host.Host, maxNodesConn int) *LongLivedStreamRecordedService[T] {
|
||||
service := &LongLivedStreamRecordedService[T]{
|
||||
LongLivedPubSubService: NewLongLivedPubSubService(h),
|
||||
StreamRecords: map[protocol.ID]map[pp.ID]*StreamRecord[T]{},
|
||||
maxNodesConn: maxNodesConn,
|
||||
ConnGuard: newConnectionRateGuard(),
|
||||
}
|
||||
go service.StartGC(30 * time.Second)
|
||||
// Garbage collection is needed on every Map of Long-Lived Stream... it may be a top level redesigned
|
||||
go service.Snapshot(1 * time.Hour)
|
||||
return service
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) StartGC(interval time.Duration) {
|
||||
go func() {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
fmt.Println("ACTUALLY RELATED INDEXERS", Indexers.Addrs, len(Indexers.Addrs))
|
||||
ix.gc()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) gc() {
|
||||
ix.StreamMU.Lock()
|
||||
now := time.Now().UTC()
|
||||
if ix.StreamRecords[ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
|
||||
ix.StreamMU.Unlock()
|
||||
return
|
||||
}
|
||||
streams := ix.StreamRecords[ProtocolHeartbeat]
|
||||
|
||||
type gcEntry struct {
|
||||
pid pp.ID
|
||||
name string
|
||||
did string
|
||||
}
|
||||
var evicted []gcEntry
|
||||
for pid, rec := range streams {
|
||||
if now.After(rec.HeartbeatStream.Expiry) || now.Sub(rec.HeartbeatStream.UptimeTracker.LastSeen) > 2*rec.HeartbeatStream.Expiry.Sub(now) {
|
||||
name, did := "", ""
|
||||
if rec.HeartbeatStream != nil {
|
||||
name = rec.HeartbeatStream.Name
|
||||
did = rec.HeartbeatStream.DID
|
||||
}
|
||||
evicted = append(evicted, gcEntry{pid, name, did})
|
||||
for _, sstreams := range ix.StreamRecords {
|
||||
if sstreams[pid] != nil {
|
||||
if sstreams[pid].HeartbeatStream != nil && sstreams[pid].HeartbeatStream.Stream != nil {
|
||||
sstreams[pid].HeartbeatStream.Stream.Close()
|
||||
}
|
||||
|
||||
delete(sstreams, pid)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
|
||||
if ix.AfterDelete != nil {
|
||||
for _, e := range evicted {
|
||||
ix.AfterDelete(e.pid, e.name, e.did)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) Snapshot(interval time.Duration) {
|
||||
go func() {
|
||||
logger := oclib.GetLogger()
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
infos := ix.snapshot()
|
||||
for _, inf := range infos {
|
||||
logger.Info().Msg(" -> " + inf.DID)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// -------- Snapshot / Query --------
|
||||
func (ix *LongLivedStreamRecordedService[T]) snapshot() []*StreamRecord[T] {
|
||||
ix.StreamMU.Lock()
|
||||
defer ix.StreamMU.Unlock()
|
||||
|
||||
out := make([]*StreamRecord[T], 0, len(ix.StreamRecords))
|
||||
for _, streams := range ix.StreamRecords {
|
||||
for _, stream := range streams {
|
||||
out = append(out, stream)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) HandleHeartbeat(s network.Stream) {
|
||||
logger := oclib.GetLogger()
|
||||
defer s.Close()
|
||||
|
||||
// AllowInbound: burst guard + ban check before the first byte is read.
|
||||
if ix.AllowInbound != nil {
|
||||
remotePeer := s.Conn().RemotePeer()
|
||||
ix.StreamMU.RLock()
|
||||
_, exists := ix.StreamRecords[ProtocolHeartbeat][remotePeer]
|
||||
ix.StreamMU.RUnlock()
|
||||
if err := ix.AllowInbound(remotePeer, !exists); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("inbound connection refused")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(s)
|
||||
for {
|
||||
ix.StreamMU.Lock()
|
||||
if ix.StreamRecords[ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
|
||||
}
|
||||
streams := ix.StreamRecords[ProtocolHeartbeat]
|
||||
streamsAnonym := map[pp.ID]HeartBeatStreamed{}
|
||||
for k, v := range streams {
|
||||
streamsAnonym[k] = v
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
pid, hb, err := CheckHeartbeat(ix.Host, s, dec, streamsAnonym, &ix.StreamMU, ix.maxNodesConn)
|
||||
if err != nil {
|
||||
// Stream-level errors (EOF, reset, closed) mean the connection is gone
|
||||
// — exit so the goroutine doesn't spin forever on a dead stream.
|
||||
// Metric/policy errors (score too low, too many connections) are transient
|
||||
// — those are also stream-terminal since the stream carries one session.
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
|
||||
strings.Contains(err.Error(), "reset") ||
|
||||
strings.Contains(err.Error(), "closed") ||
|
||||
strings.Contains(err.Error(), "too many connections") {
|
||||
logger.Info().Err(err).Msg("heartbeat stream terminated, closing handler")
|
||||
return
|
||||
}
|
||||
logger.Warn().Err(err).Msg("heartbeat check failed, retrying on same stream")
|
||||
continue
|
||||
}
|
||||
// ValidateHeartbeat: per-tick behavioral check (rate limiting, bans).
|
||||
if ix.ValidateHeartbeat != nil {
|
||||
if err := ix.ValidateHeartbeat(*pid); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", pid.String()).Msg("heartbeat rejected, closing stream")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
}
|
||||
ix.StreamMU.Lock()
|
||||
// if record already seen update last seen
|
||||
if rec, ok := streams[*pid]; ok {
|
||||
rec.DID = hb.DID
|
||||
// Preserve the existing UptimeTracker so TotalOnline accumulates correctly.
|
||||
// hb.Stream is a fresh Stream with no UptimeTracker; carry the old one over.
|
||||
oldTracker := rec.GetUptimeTracker()
|
||||
rec.HeartbeatStream = hb.Stream
|
||||
if oldTracker != nil {
|
||||
rec.HeartbeatStream.UptimeTracker = oldTracker
|
||||
} else {
|
||||
rec.HeartbeatStream.UptimeTracker = &UptimeTracker{FirstSeen: time.Now().UTC()}
|
||||
}
|
||||
rec.HeartbeatStream.UptimeTracker.RecordHeartbeat()
|
||||
rec.LastScore = hb.Score
|
||||
logger.Info().Msg("A new node is updated : " + pid.String())
|
||||
} else {
|
||||
tracker := &UptimeTracker{FirstSeen: time.Now().UTC()}
|
||||
tracker.RecordHeartbeat()
|
||||
hb.Stream.UptimeTracker = tracker
|
||||
streams[*pid] = &StreamRecord[T]{
|
||||
DID: hb.DID,
|
||||
HeartbeatStream: hb.Stream,
|
||||
LastScore: hb.Score,
|
||||
}
|
||||
logger.Info().Msg("A new node is subscribed : " + pid.String())
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
// Enrich hb.DID before calling the hook: nodes never set hb.DID directly;
|
||||
// extract it from the embedded signed PeerRecord if available, then fall
|
||||
// back to the DID stored by handleNodePublish in the stream record.
|
||||
if hb.DID == "" && len(hb.Record) > 0 {
|
||||
var partial struct {
|
||||
DID string `json:"did"`
|
||||
}
|
||||
if json.Unmarshal(hb.Record, &partial) == nil && partial.DID != "" {
|
||||
hb.DID = partial.DID
|
||||
}
|
||||
}
|
||||
if hb.DID == "" {
|
||||
ix.StreamMU.RLock()
|
||||
if rec, ok := streams[*pid]; ok {
|
||||
hb.DID = rec.DID
|
||||
}
|
||||
ix.StreamMU.RUnlock()
|
||||
}
|
||||
if ix.AfterHeartbeat != nil && hb.DID != "" {
|
||||
go ix.AfterHeartbeat(hb)
|
||||
}
|
||||
// Send response back to the node (bidirectional heartbeat).
|
||||
if ix.BuildHeartbeatResponse != nil {
|
||||
if resp := ix.BuildHeartbeatResponse(s.Conn().RemotePeer(), hb); resp != nil {
|
||||
s.SetWriteDeadline(time.Now().Add(3 * time.Second))
|
||||
json.NewEncoder(s).Encode(resp)
|
||||
s.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CheckHeartbeat(h host.Host, s network.Stream, dec *json.Decoder, streams map[pp.ID]HeartBeatStreamed, lock *sync.RWMutex, maxNodes int) (*pp.ID, *Heartbeat, error) {
|
||||
if len(h.Network().Peers()) >= maxNodes {
|
||||
return nil, nil, fmt.Errorf("too many connections, try another indexer")
|
||||
}
|
||||
var hb Heartbeat
|
||||
if err := dec.Decode(&hb); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
_, bpms, latencyScore, _ := getBandwidthChallengeRate(h, s.Conn().RemotePeer(), MinPayloadChallenge+int(rand.Float64()*(MaxPayloadChallenge-MinPayloadChallenge)))
|
||||
{
|
||||
pid, err := pp.Decode(hb.PeerID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
uptimeRatio := float64(0)
|
||||
age := time.Duration(0)
|
||||
lock.Lock()
|
||||
if rec, ok := streams[pid]; ok && rec.GetUptimeTracker() != nil {
|
||||
uptimeRatio = rec.GetUptimeTracker().UptimeRatio()
|
||||
age = rec.GetUptimeTracker().Uptime()
|
||||
}
|
||||
lock.Unlock()
|
||||
// E: measure the indexer's own subnet diversity, not the node's view.
|
||||
diversity := getOwnDiversityRate(h)
|
||||
// fillRate: fraction of indexer capacity used — higher = more peers trust this indexer.
|
||||
fillRate := 0.0
|
||||
if maxNodes > 0 {
|
||||
fillRate = float64(len(h.Network().Peers())) / float64(maxNodes)
|
||||
if fillRate > 1 {
|
||||
fillRate = 1
|
||||
}
|
||||
}
|
||||
hb.ComputeIndexerScore(uptimeRatio, bpms, diversity, latencyScore, fillRate)
|
||||
// B: dynamic minScore — starts at 20% for brand-new peers, ramps to 80% at 24h.
|
||||
minScore := dynamicMinScore(age)
|
||||
if hb.Score < minScore {
|
||||
return nil, nil, errors.New("not enough trusting value")
|
||||
}
|
||||
hb.Stream = &Stream{
|
||||
Name: hb.Name,
|
||||
DID: hb.DID,
|
||||
Stream: s,
|
||||
Expiry: time.Now().UTC().Add(2 * time.Minute),
|
||||
} // here is the long-lived bidirectional heartbeat.
|
||||
return &pid, &hb, err
|
||||
}
|
||||
}
|
||||
|
||||
// ── ConnectionRateGuard ───────────────────────────────────────────────────────
|
||||
|
||||
// ConnectionRateGuard limits the number of NEW incoming connections accepted
|
||||
// within a sliding time window. It protects public indexers against coordinated
|
||||
// registration floods (Sybil bursts).
|
||||
|
||||
const defaultMaxConnPerWindow = 20
|
||||
const defaultConnWindowSecs = 30
|
||||
|
||||
type ConnectionRateGuard struct {
|
||||
mu sync.Mutex
|
||||
window []time.Time
|
||||
maxInWindow int
|
||||
windowDur time.Duration
|
||||
}
|
||||
|
||||
func newConnectionRateGuard() *ConnectionRateGuard {
|
||||
cfg := conf.GetConfig()
|
||||
return &ConnectionRateGuard{
|
||||
maxInWindow: CfgOr(cfg.MaxConnPerWindow, defaultMaxConnPerWindow),
|
||||
windowDur: time.Duration(CfgOr(cfg.ConnWindowSecs, defaultConnWindowSecs)) * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// Allow returns true if a new connection may be accepted.
|
||||
// The internal window is pruned on each call so memory stays bounded.
|
||||
func (g *ConnectionRateGuard) Allow() bool {
|
||||
g.mu.Lock()
|
||||
defer g.mu.Unlock()
|
||||
now := time.Now()
|
||||
cutoff := now.Add(-g.windowDur)
|
||||
i := 0
|
||||
for i < len(g.window) && g.window[i].Before(cutoff) {
|
||||
i++
|
||||
}
|
||||
g.window = g.window[i:]
|
||||
if len(g.window) >= g.maxInWindow {
|
||||
return false
|
||||
}
|
||||
g.window = append(g.window, now)
|
||||
return true
|
||||
}
|
||||
func CfgOr(v, def int) int {
|
||||
if v > 0 {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
@@ -3,178 +3,150 @@ package common
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/conf"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
peer "cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type LongLivedStreamRecordedService[T interface{}] struct {
|
||||
*LongLivedPubSubService
|
||||
StreamRecords map[protocol.ID]map[pp.ID]*StreamRecord[T]
|
||||
StreamMU sync.RWMutex
|
||||
maxNodesConn int
|
||||
isBidirectionnal bool
|
||||
// InitialEventHops is the starting hop count for SWIM membership events.
|
||||
// floor(log2(typical max-pool)) + 1 gives O(log n) propagation rounds.
|
||||
const InitialEventHops = 4
|
||||
|
||||
const maxMemberEventQueue = 50
|
||||
|
||||
// MembershipEventQueue holds SWIM membership events to be piggybacked on
|
||||
// outgoing heartbeats (infection-style dissemination). Bounded at
|
||||
// maxMemberEventQueue entries; events are deduplicated by PeerID.
|
||||
type MembershipEventQueue struct {
|
||||
mu sync.Mutex
|
||||
events []MemberEvent
|
||||
}
|
||||
|
||||
func NewStreamRecordedService[T interface{}](h host.Host, maxNodesConn int, isBidirectionnal bool) *LongLivedStreamRecordedService[T] {
|
||||
service := &LongLivedStreamRecordedService[T]{
|
||||
LongLivedPubSubService: NewLongLivedPubSubService(h),
|
||||
StreamRecords: map[protocol.ID]map[pp.ID]*StreamRecord[T]{},
|
||||
maxNodesConn: maxNodesConn,
|
||||
isBidirectionnal: isBidirectionnal,
|
||||
// memberEventPriority maps event types to an integer so higher-severity
|
||||
// events override lower-severity ones for the same PeerID.
|
||||
func memberEventPriority(t MemberEventType) int {
|
||||
switch t {
|
||||
case MemberDead:
|
||||
return 3
|
||||
case MemberSuspect:
|
||||
return 2
|
||||
case MemberAlive:
|
||||
return 1
|
||||
}
|
||||
go service.StartGC(30 * time.Second)
|
||||
// Garbage collection is needed on every Map of Long-Lived Stream... it may be a top level redesigned
|
||||
go service.Snapshot(1 * time.Hour)
|
||||
return service
|
||||
return 0
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) StartGC(interval time.Duration) {
|
||||
go func() {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
ix.gc()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) gc() {
|
||||
ix.StreamMU.Lock()
|
||||
defer ix.StreamMU.Unlock()
|
||||
now := time.Now().UTC()
|
||||
if ix.StreamRecords[ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
|
||||
return
|
||||
}
|
||||
streams := ix.StreamRecords[ProtocolHeartbeat]
|
||||
|
||||
for pid, rec := range streams {
|
||||
if now.After(rec.HeartbeatStream.Expiry) || now.Sub(rec.LastSeen) > 2*rec.HeartbeatStream.Expiry.Sub(now) {
|
||||
for _, sstreams := range ix.StreamRecords {
|
||||
if sstreams[pid] != nil {
|
||||
delete(sstreams, pid)
|
||||
}
|
||||
// Add inserts or updates a membership event.
|
||||
// An incoming event replaces the existing entry for the same PeerID when:
|
||||
// - its Incarnation is higher, OR
|
||||
// - the Incarnation is equal but the event type is higher-severity.
|
||||
func (q *MembershipEventQueue) Add(e MemberEvent) {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
for i, ex := range q.events {
|
||||
if ex.PeerID == e.PeerID {
|
||||
if e.Incarnation > ex.Incarnation ||
|
||||
(e.Incarnation == ex.Incarnation && memberEventPriority(e.Type) > memberEventPriority(ex.Type)) {
|
||||
q.events[i] = e
|
||||
}
|
||||
ix.PubsubMu.Lock()
|
||||
if ix.LongLivedPubSubs[TopicPubSubNodeActivity] != nil {
|
||||
ad, err := pp.AddrInfoFromString("/ip4/" + conf.GetConfig().Hostname + " /tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + " /p2p/" + ix.Host.ID().String())
|
||||
if err == nil {
|
||||
if b, err := json.Marshal(TopicNodeActivityPub{
|
||||
Disposer: *ad,
|
||||
Name: rec.HeartbeatStream.Name,
|
||||
DID: rec.HeartbeatStream.DID,
|
||||
PeerID: pid.String(),
|
||||
NodeActivity: peer.OFFLINE,
|
||||
}); err == nil {
|
||||
ix.LongLivedPubSubs[TopicPubSubNodeActivity].Publish(context.Background(), b)
|
||||
}
|
||||
}
|
||||
}
|
||||
ix.PubsubMu.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(q.events) >= maxMemberEventQueue {
|
||||
q.events = q.events[1:] // drop oldest
|
||||
}
|
||||
q.events = append(q.events, e)
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) Snapshot(interval time.Duration) {
|
||||
go func() {
|
||||
logger := oclib.GetLogger()
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
infos := ix.snapshot()
|
||||
for _, inf := range infos {
|
||||
logger.Info().Msg(" -> " + inf.DID)
|
||||
// Drain returns up to max events ready for transmission.
|
||||
// HopsLeft is decremented on each call; events that reach 0 are removed from
|
||||
// the queue (they have already propagated enough rounds).
|
||||
func (q *MembershipEventQueue) Drain(max int) []MemberEvent {
|
||||
q.mu.Lock()
|
||||
defer q.mu.Unlock()
|
||||
if len(q.events) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]MemberEvent, 0, max)
|
||||
kept := q.events[:0]
|
||||
for _, e := range q.events {
|
||||
if len(out) < max {
|
||||
e.HopsLeft--
|
||||
out = append(out, e)
|
||||
if e.HopsLeft > 0 {
|
||||
kept = append(kept, e)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// -------- Snapshot / Query --------
|
||||
func (ix *LongLivedStreamRecordedService[T]) snapshot() []*StreamRecord[T] {
|
||||
ix.StreamMU.Lock()
|
||||
defer ix.StreamMU.Unlock()
|
||||
|
||||
out := make([]*StreamRecord[T], 0, len(ix.StreamRecords))
|
||||
for _, streams := range ix.StreamRecords {
|
||||
for _, stream := range streams {
|
||||
out = append(out, stream)
|
||||
// HopsLeft reached 0: event has propagated enough, drop from queue.
|
||||
} else {
|
||||
kept = append(kept, e)
|
||||
}
|
||||
}
|
||||
q.events = kept
|
||||
return out
|
||||
}
|
||||
|
||||
func (ix *LongLivedStreamRecordedService[T]) HandleNodeHeartbeat(s network.Stream) {
|
||||
defer s.Close()
|
||||
for {
|
||||
pid, hb, err := CheckHeartbeat(ix.Host, s, ix.maxNodesConn)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ix.StreamMU.Lock()
|
||||
if ix.StreamRecords[ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[ProtocolHeartbeat] = map[pp.ID]*StreamRecord[T]{}
|
||||
}
|
||||
streams := ix.StreamRecords[ProtocolHeartbeat]
|
||||
// if record already seen update last seen
|
||||
if rec, ok := streams[*pid]; ok {
|
||||
rec.DID = hb.DID
|
||||
rec.Stream = s
|
||||
rec.HeartbeatStream = hb.Stream
|
||||
rec.LastSeen = time.Now().UTC()
|
||||
} else {
|
||||
streams[*pid] = &StreamRecord[T]{
|
||||
DID: hb.DID,
|
||||
HeartbeatStream: hb.Stream,
|
||||
Stream: s,
|
||||
LastSeen: time.Now().UTC(),
|
||||
}
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
}
|
||||
// NodeEventQueue is the global SWIM event queue for the node side.
|
||||
// Events are added on suspect/dead detection and drained into outgoing heartbeats.
|
||||
var NodeEventQueue = &MembershipEventQueue{}
|
||||
|
||||
const (
|
||||
ProtocolPublish = "/opencloud/record/publish/1.0"
|
||||
ProtocolGet = "/opencloud/record/get/1.0"
|
||||
ProtocolDelete = "/opencloud/record/delete/1.0"
|
||||
// ProtocolIndirectProbe is opened by a node toward a live indexer to ask it
|
||||
// to actively probe a suspected indexer on the node's behalf (SWIM indirect ping).
|
||||
// It is the only inter-indexer protocol — indexers do not maintain persistent
|
||||
// connections to each other; this stream is one-shot and short-lived.
|
||||
ProtocolIndirectProbe = "/opencloud/indexer/probe/1.0"
|
||||
)
|
||||
|
||||
// IndirectProbeRequest is sent by a node over ProtocolIndirectProbe.
|
||||
// The receiving indexer must attempt to reach Target and report back.
|
||||
type IndirectProbeRequest struct {
|
||||
Target pp.AddrInfo `json:"target"`
|
||||
}
|
||||
|
||||
func CheckHeartbeat(h host.Host, s network.Stream, maxNodes int) (*pp.ID, *Heartbeat, error) {
|
||||
if len(h.Network().Peers()) >= maxNodes {
|
||||
return nil, nil, fmt.Errorf("too many connections, try another indexer")
|
||||
}
|
||||
var hb Heartbeat
|
||||
if err := json.NewDecoder(s).Decode(&hb); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
pid, err := pp.Decode(hb.PeerID)
|
||||
hb.Stream = &Stream{
|
||||
Name: hb.Name,
|
||||
DID: hb.DID,
|
||||
Stream: s,
|
||||
Expiry: time.Now().UTC().Add(2 * time.Minute),
|
||||
} // here is the long-lived bidirectionnal heart bit.
|
||||
return &pid, &hb, err
|
||||
// IndirectProbeResponse is the reply from the probing indexer.
|
||||
type IndirectProbeResponse struct {
|
||||
Reachable bool `json:"reachable"`
|
||||
LatencyMs int64 `json:"latency_ms,omitempty"`
|
||||
}
|
||||
|
||||
type StreamRecord[T interface{}] struct {
|
||||
DID string
|
||||
HeartbeatStream *Stream
|
||||
Stream network.Stream
|
||||
Record T
|
||||
LastSeen time.Time // to check expiry
|
||||
}
|
||||
const ProtocolHeartbeat = "/opencloud/heartbeat/1.0"
|
||||
|
||||
// ProtocolWitnessQuery is opened by a node to ask a peer what it thinks of a given indexer.
|
||||
const ProtocolWitnessQuery = "/opencloud/witness/1.0"
|
||||
|
||||
// ProtocolSearchPeer is opened by a node toward one of its indexers to start a
|
||||
// distributed peer search. The stream stays open; the indexer writes
|
||||
// SearchPeerResult JSON objects as results arrive from the GossipSub mesh.
|
||||
const ProtocolSearchPeer = "/opencloud/search/peer/1.0"
|
||||
|
||||
// ProtocolSearchPeerResponse is opened by an indexer back toward the emitting
|
||||
// indexer to deliver search results found in its referencedNodes.
|
||||
const ProtocolSearchPeerResponse = "/opencloud/search/peer/response/1.0"
|
||||
|
||||
// ProtocolBandwidthProbe is a dedicated short-lived stream used exclusively
|
||||
// for bandwidth/latency measurement. The handler echoes any bytes it receives.
|
||||
// All nodes and indexers register this handler so peers can measure them.
|
||||
const ProtocolBandwidthProbe = "/opencloud/probe/1.0"
|
||||
|
||||
type Stream struct {
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"`
|
||||
Stream network.Stream
|
||||
Expiry time.Time `json:"expiry"`
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"`
|
||||
Stream network.Stream
|
||||
Expiry time.Time `json:"expiry"`
|
||||
UptimeTracker *UptimeTracker
|
||||
}
|
||||
|
||||
func (s *Stream) GetUptimeTracker() *UptimeTracker {
|
||||
return s.UptimeTracker
|
||||
}
|
||||
|
||||
func NewStream[T interface{}](s network.Stream, did string, record T) *Stream {
|
||||
@@ -185,93 +157,126 @@ func NewStream[T interface{}](s network.Stream, did string, record T) *Stream {
|
||||
}
|
||||
}
|
||||
|
||||
type ProtocolStream map[protocol.ID]map[pp.ID]*Stream
|
||||
|
||||
func (ps ProtocolStream) Get(protocol protocol.ID) map[pp.ID]*Stream {
|
||||
if ps[protocol] == nil {
|
||||
ps[protocol] = map[pp.ID]*Stream{}
|
||||
}
|
||||
|
||||
return ps[protocol]
|
||||
type StreamRecord[T interface{}] struct {
|
||||
DID string
|
||||
HeartbeatStream *Stream
|
||||
Record T
|
||||
LastScore float64
|
||||
}
|
||||
|
||||
func (ps ProtocolStream) Add(protocol protocol.ID, peerID *pp.ID, s *Stream) error {
|
||||
if ps[protocol] == nil {
|
||||
ps[protocol] = map[pp.ID]*Stream{}
|
||||
func (s *StreamRecord[T]) GetUptimeTracker() *UptimeTracker {
|
||||
if s.HeartbeatStream == nil {
|
||||
return nil
|
||||
}
|
||||
if peerID != nil {
|
||||
if s != nil {
|
||||
ps[protocol][*peerID] = s
|
||||
} else {
|
||||
return errors.New("unable to add stream : stream missing")
|
||||
return s.HeartbeatStream.UptimeTracker
|
||||
}
|
||||
|
||||
type ProtocolInfo struct {
|
||||
PersistantStream bool
|
||||
WaitResponse bool
|
||||
TTL time.Duration
|
||||
}
|
||||
|
||||
func TempStream(h host.Host, ad pp.AddrInfo, proto protocol.ID, did string, streams ProtocolStream, pts map[protocol.ID]*ProtocolInfo, mu *sync.RWMutex) (ProtocolStream, error) {
|
||||
expiry := 2 * time.Second
|
||||
if pts[proto] != nil {
|
||||
expiry = pts[proto].TTL
|
||||
}
|
||||
ctxTTL, cancelTTL := context.WithTimeout(context.Background(), expiry)
|
||||
defer cancelTTL()
|
||||
|
||||
if h.Network().Connectedness(ad.ID) != network.Connected {
|
||||
if err := h.Connect(ctxTTL, ad); err != nil {
|
||||
fmt.Println("Connectedness", ad.ID, err)
|
||||
|
||||
return streams, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps ProtocolStream) Delete(protocol protocol.ID, peerID *pp.ID) {
|
||||
if streams, ok := ps[protocol]; ok {
|
||||
if peerID != nil && streams[*peerID] != nil {
|
||||
streams[*peerID].Stream.Close()
|
||||
delete(streams, *peerID)
|
||||
} else {
|
||||
for _, s := range ps {
|
||||
for _, v := range s {
|
||||
v.Stream.Close()
|
||||
}
|
||||
fmt.Println("PROTO", streams[proto])
|
||||
if streams[proto] != nil && streams[proto][ad.ID] != nil {
|
||||
return streams, nil
|
||||
} else if s, err := h.NewStream(ctxTTL, ad.ID, proto); err == nil {
|
||||
mu.Lock()
|
||||
if streams[proto] == nil {
|
||||
streams[proto] = map[pp.ID]*Stream{}
|
||||
}
|
||||
mu.Unlock()
|
||||
time.AfterFunc(expiry, func() {
|
||||
mu.Lock()
|
||||
if streams[proto] != nil && streams[proto][ad.ID] != nil && streams[proto][ad.ID].Stream != nil {
|
||||
streams[proto][ad.ID].Stream.Close()
|
||||
}
|
||||
delete(ps, protocol)
|
||||
delete(streams[proto], ad.ID)
|
||||
mu.Unlock()
|
||||
})
|
||||
mu.Lock()
|
||||
streams[proto][ad.ID] = &Stream{
|
||||
DID: did,
|
||||
Stream: s,
|
||||
Expiry: time.Now().UTC().Add(expiry),
|
||||
}
|
||||
mu.Unlock()
|
||||
return streams, nil
|
||||
} else {
|
||||
fmt.Println("ERRER", err)
|
||||
return streams, err
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
ProtocolPublish = "/opencloud/record/publish/1.0"
|
||||
ProtocolGet = "/opencloud/record/get/1.0"
|
||||
)
|
||||
|
||||
var StaticIndexers []*pp.AddrInfo = []*pp.AddrInfo{}
|
||||
var StreamIndexers ProtocolStream = ProtocolStream{}
|
||||
|
||||
func ConnectToIndexers(h host.Host, minIndexer int, maxIndexer int, myPID pp.ID) {
|
||||
func sendHeartbeat(ctx context.Context, h host.Host, proto protocol.ID, p *pp.AddrInfo,
|
||||
hb Heartbeat, ps ProtocolStream, interval time.Duration) (*HeartbeatResponse, time.Duration, error) {
|
||||
logger := oclib.GetLogger()
|
||||
ctx := context.Background()
|
||||
addresses := strings.Split(conf.GetConfig().IndexerAddresses, ",")
|
||||
|
||||
if len(addresses) > maxIndexer {
|
||||
addresses = addresses[0:maxIndexer]
|
||||
if ps[proto] == nil {
|
||||
ps[proto] = map[pp.ID]*Stream{}
|
||||
}
|
||||
|
||||
for _, indexerAddr := range addresses {
|
||||
ad, err := pp.AddrInfoFromString(indexerAddr)
|
||||
if err != nil {
|
||||
streams := ps[proto]
|
||||
pss, exists := streams[p.ID]
|
||||
ctxTTL, cancel := context.WithTimeout(ctx, 3*interval)
|
||||
defer cancel()
|
||||
if h.Network().Connectedness(p.ID) != network.Connected {
|
||||
if err := h.Connect(ctxTTL, *p); err != nil {
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
if h.Network().Connectedness(ad.ID) != network.Connected {
|
||||
if err := h.Connect(ctx, *ad); err != nil {
|
||||
fmt.Println(err)
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
StaticIndexers = append(StaticIndexers, ad)
|
||||
// make a privilege streams with indexer.
|
||||
for _, proto := range []protocol.ID{ProtocolPublish, ProtocolGet, ProtocolHeartbeat} {
|
||||
AddStreamProtocol(nil, StreamIndexers, h, proto, ad.ID, myPID, true, nil)
|
||||
return nil, 0, err
|
||||
}
|
||||
exists = false
|
||||
}
|
||||
if len(StaticIndexers) == 0 {
|
||||
logger.Err(errors.New("you run a node without indexers... your gonna be isolated."))
|
||||
if !exists || pss.Stream == nil {
|
||||
logger.Info().Msg("New Stream engaged as Heartbeat " + fmt.Sprintf("%v", proto) + " " + p.ID.String())
|
||||
s, err := h.NewStream(ctx, p.ID, proto)
|
||||
if err != nil {
|
||||
logger.Err(err).Msg(err.Error())
|
||||
return nil, 0, err
|
||||
}
|
||||
pss = &Stream{
|
||||
Stream: s,
|
||||
Expiry: time.Now().UTC().Add(2 * time.Minute),
|
||||
}
|
||||
streams[p.ID] = pss
|
||||
}
|
||||
|
||||
if len(StaticIndexers) < minIndexer {
|
||||
// TODO : ask for unknown indexer.
|
||||
sentAt := time.Now()
|
||||
if err := json.NewEncoder(pss.Stream).Encode(&hb); err != nil {
|
||||
pss.Stream.Close()
|
||||
pss.Stream = nil
|
||||
return nil, 0, err
|
||||
}
|
||||
SendHeartbeat(ctx, ProtocolHeartbeat, conf.GetConfig().Name, h, StreamIndexers, StaticIndexers, 20*time.Second) // your indexer is just like a node for the next indexer.
|
||||
pss.Expiry = time.Now().UTC().Add(2 * time.Minute)
|
||||
|
||||
// Try to read a response (indexers that support bidirectional heartbeat respond).
|
||||
pss.Stream.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
var resp HeartbeatResponse
|
||||
rtt := time.Since(sentAt)
|
||||
if err := json.NewDecoder(pss.Stream).Decode(&resp); err == nil {
|
||||
rtt = time.Since(sentAt)
|
||||
pss.Stream.SetReadDeadline(time.Time{})
|
||||
return &resp, rtt, nil
|
||||
}
|
||||
pss.Stream.SetReadDeadline(time.Time{})
|
||||
return nil, rtt, nil
|
||||
}
|
||||
|
||||
func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host, proto protocol.ID, id pp.ID, mypid pp.ID, force bool, onStreamCreated *func(network.Stream)) ProtocolStream {
|
||||
logger := oclib.GetLogger()
|
||||
if onStreamCreated == nil {
|
||||
f := func(s network.Stream) {
|
||||
protoS[proto][id] = &Stream{
|
||||
@@ -294,7 +299,7 @@ func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host,
|
||||
if protoS[proto][id] != nil {
|
||||
protoS[proto][id].Expiry = time.Now().Add(2 * time.Minute)
|
||||
} else {
|
||||
fmt.Println("GENERATE STREAM", proto, id)
|
||||
logger.Info().Msg("NEW STREAM Generated" + fmt.Sprintf("%v", proto) + " " + id.String())
|
||||
s, err := h.NewStream(*ctx, id, proto)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
@@ -304,99 +309,3 @@ func AddStreamProtocol(ctx *context.Context, protoS ProtocolStream, h host.Host,
|
||||
}
|
||||
return protoS
|
||||
}
|
||||
|
||||
type Heartbeat struct {
|
||||
Name string `json:"name"`
|
||||
Stream *Stream `json:"stream"`
|
||||
DID string `json:"did"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
type HeartbeatInfo []struct {
|
||||
Info []byte `json:"info"`
|
||||
}
|
||||
|
||||
const ProtocolHeartbeat = "/opencloud/heartbeat/1.0"
|
||||
|
||||
func SendHeartbeat(ctx context.Context, proto protocol.ID, name string, h host.Host, ps ProtocolStream, peers []*pp.AddrInfo, interval time.Duration) {
|
||||
peerID, err := oclib.GenerateNodeID()
|
||||
if err == nil {
|
||||
panic("can't heartbeat daemon failed to start")
|
||||
}
|
||||
go func() {
|
||||
t := time.NewTicker(interval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
hb := Heartbeat{
|
||||
Name: name,
|
||||
DID: peerID,
|
||||
PeerID: h.ID().String(),
|
||||
Timestamp: time.Now().UTC().Unix(),
|
||||
}
|
||||
for _, ix := range peers {
|
||||
_ = sendHeartbeat(ctx, h, proto, ix, hb, ps, interval*time.Second)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func sendHeartbeat(ctx context.Context, h host.Host, proto protocol.ID, p *pp.AddrInfo,
|
||||
hb Heartbeat, ps ProtocolStream, interval time.Duration) error {
|
||||
streams := ps.Get(proto)
|
||||
if len(streams) == 0 {
|
||||
return errors.New("no stream for protocol heartbeat founded")
|
||||
}
|
||||
pss, exists := streams[p.ID]
|
||||
|
||||
ctxTTL, _ := context.WithTimeout(ctx, 3*interval)
|
||||
// Connect si nécessaire
|
||||
if h.Network().Connectedness(p.ID) != network.Connected {
|
||||
_ = h.Connect(ctxTTL, *p)
|
||||
exists = false // on devra recréer le stream
|
||||
}
|
||||
|
||||
// Crée le stream si inexistant ou fermé
|
||||
if !exists || pss.Stream == nil {
|
||||
s, err := h.NewStream(ctx, p.ID, proto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pss = &Stream{
|
||||
Stream: s,
|
||||
Expiry: time.Now().UTC().Add(2 * time.Minute),
|
||||
}
|
||||
streams[p.ID] = pss
|
||||
}
|
||||
|
||||
// Envoie le heartbeat
|
||||
ss := json.NewEncoder(pss.Stream)
|
||||
err := ss.Encode(&hb)
|
||||
if err != nil {
|
||||
pss.Stream.Close()
|
||||
pss.Stream = nil // recréera au prochain tick
|
||||
return err
|
||||
}
|
||||
pss.Expiry = time.Now().UTC().Add(2 * time.Minute)
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
func SearchPeer(search string) ([]*peer.Peer, error) {
|
||||
ps := []*peer.Peer{}
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
peers := access.Search(nil, search, false)
|
||||
if len(peers.Data) == 0 {
|
||||
return ps, errors.New("no self available")
|
||||
}
|
||||
for _, p := range peers.Data {
|
||||
ps = append(ps, p.(*peer.Peer))
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
*/
|
||||
|
||||
199
daemons/node/common/consensus.go
Normal file
199
daemons/node/common/consensus.go
Normal file
@@ -0,0 +1,199 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ProtocolIndexerCandidates is opened by a node toward its remaining indexers
|
||||
// to request candidate replacement indexers after an ejection event.
|
||||
const ProtocolIndexerCandidates = "/opencloud/indexer/candidates/1.0"
|
||||
|
||||
// IndexerCandidatesRequest is sent by a node to one of its indexers.
|
||||
// Count is how many candidates are needed.
|
||||
type IndexerCandidatesRequest struct {
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
// IndexerCandidatesResponse carries a random sample of known indexers from
|
||||
// the responding indexer's DHT cache.
|
||||
type IndexerCandidatesResponse struct {
|
||||
Candidates []pp.AddrInfo `json:"candidates"`
|
||||
}
|
||||
|
||||
// TriggerConsensus asks each remaining indexer for a random pool of candidates,
|
||||
// scores them asynchronously via a one-shot probe heartbeat, and admits the
|
||||
// best ones to StaticIndexers. Falls back to DHT replenishment for any gap.
|
||||
//
|
||||
// Must be called in a goroutine — it blocks until all probes have returned
|
||||
// (or timed out), which can take up to ~10s.
|
||||
func TriggerConsensus(h host.Host, remaining []pp.AddrInfo, need int) {
|
||||
if need <= 0 || len(remaining) == 0 {
|
||||
return
|
||||
}
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Int("voters", len(remaining)).Int("need", need).
|
||||
Msg("[consensus] starting indexer candidate consensus")
|
||||
|
||||
// Phase 1 — collect candidates from all remaining indexers in parallel.
|
||||
type collectResult struct{ candidates []pp.AddrInfo }
|
||||
collectCh := make(chan collectResult, len(remaining))
|
||||
for _, ai := range remaining {
|
||||
go func(ai pp.AddrInfo) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
s, err := h.NewStream(ctx, ai.ID, ProtocolIndexerCandidates)
|
||||
if err != nil {
|
||||
collectCh <- collectResult{}
|
||||
return
|
||||
}
|
||||
defer s.Close()
|
||||
s.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
if err := json.NewEncoder(s).Encode(IndexerCandidatesRequest{Count: need + 2}); err != nil {
|
||||
collectCh <- collectResult{}
|
||||
return
|
||||
}
|
||||
var resp IndexerCandidatesResponse
|
||||
if err := json.NewDecoder(s).Decode(&resp); err != nil {
|
||||
collectCh <- collectResult{}
|
||||
return
|
||||
}
|
||||
collectCh <- collectResult{candidates: resp.Candidates}
|
||||
}(ai)
|
||||
}
|
||||
|
||||
// Merge and deduplicate, excluding indexers already in the pool.
|
||||
seen := map[pp.ID]struct{}{}
|
||||
for _, ai := range Indexers.GetAddrIDs() {
|
||||
seen[ai] = struct{}{}
|
||||
|
||||
}
|
||||
var candidates []pp.AddrInfo
|
||||
for range remaining {
|
||||
r := <-collectCh
|
||||
for _, ai := range r.candidates {
|
||||
if _, dup := seen[ai.ID]; !dup {
|
||||
seen[ai.ID] = struct{}{}
|
||||
candidates = append(candidates, ai)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(candidates) == 0 {
|
||||
logger.Info().Msg("[consensus] no candidates from voters, falling back to DHT")
|
||||
replenishIndexersFromDHT(h, need)
|
||||
return
|
||||
}
|
||||
logger.Info().Int("candidates", len(candidates)).Msg("[consensus] scoring candidates")
|
||||
|
||||
// Phase 2 — score all candidates in parallel via a one-shot probe heartbeat.
|
||||
type scoreResult struct {
|
||||
ai pp.AddrInfo
|
||||
score float64
|
||||
}
|
||||
scoreCh := make(chan scoreResult, len(candidates))
|
||||
for _, ai := range candidates {
|
||||
go func(ai pp.AddrInfo) {
|
||||
resp, rtt, err := probeIndexer(h, ai)
|
||||
if err != nil {
|
||||
scoreCh <- scoreResult{ai: ai, score: 0}
|
||||
return
|
||||
}
|
||||
scoreCh <- scoreResult{ai: ai, score: quickScore(resp, rtt)}
|
||||
}(ai)
|
||||
}
|
||||
|
||||
results := make([]scoreResult, 0, len(candidates))
|
||||
for range candidates {
|
||||
results = append(results, <-scoreCh)
|
||||
}
|
||||
|
||||
// Sort descending by quick score, admit top `need` above the minimum bar.
|
||||
sort.Slice(results, func(i, j int) bool { return results[i].score > results[j].score })
|
||||
minQ := dynamicMinScore(0) // fresh peer: threshold starts at 20
|
||||
|
||||
admitted := 0
|
||||
for _, res := range results {
|
||||
if admitted >= need {
|
||||
break
|
||||
}
|
||||
if res.score < minQ {
|
||||
break // sorted desc: everything after is worse
|
||||
}
|
||||
key := addrKey(res.ai)
|
||||
if Indexers.ExistsAddr(key) {
|
||||
continue // already in pool (race with heartbeat path)
|
||||
}
|
||||
cpy := res.ai
|
||||
Indexers.SetAddr(key, &cpy)
|
||||
admitted++
|
||||
}
|
||||
|
||||
if admitted > 0 {
|
||||
logger.Info().Int("admitted", admitted).Msg("[consensus] candidates admitted to pool")
|
||||
Indexers.NudgeIt()
|
||||
}
|
||||
|
||||
// Fill any remaining gap with DHT discovery.
|
||||
if gap := need - admitted; gap > 0 {
|
||||
logger.Info().Int("gap", gap).Msg("[consensus] gap after consensus, falling back to DHT")
|
||||
replenishIndexersFromDHT(h, gap)
|
||||
}
|
||||
}
|
||||
|
||||
// probeIndexer dials the candidate, sends one lightweight heartbeat, and
|
||||
// returns the HeartbeatResponse (nil if the indexer doesn't support it) and RTT.
|
||||
func probeIndexer(h host.Host, ai pp.AddrInfo) (*HeartbeatResponse, time.Duration, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 8*time.Second)
|
||||
defer cancel()
|
||||
if h.Network().Connectedness(ai.ID) != network.Connected {
|
||||
if err := h.Connect(ctx, ai); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
}
|
||||
s, err := h.NewStream(ctx, ai.ID, ProtocolHeartbeat)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
hb := Heartbeat{PeerID: h.ID().String(), Timestamp: time.Now().UTC().Unix()}
|
||||
s.SetWriteDeadline(time.Now().Add(3 * time.Second))
|
||||
if err := json.NewEncoder(s).Encode(hb); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
s.SetWriteDeadline(time.Time{})
|
||||
|
||||
sentAt := time.Now()
|
||||
s.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
var resp HeartbeatResponse
|
||||
if err := json.NewDecoder(s).Decode(&resp); err != nil {
|
||||
// Indexer connected but no response: connection itself is the signal.
|
||||
return nil, time.Since(sentAt), nil
|
||||
}
|
||||
return &resp, time.Since(sentAt), nil
|
||||
}
|
||||
|
||||
// quickScore computes a lightweight score [0,100] from a probe result.
|
||||
// Uses only fill rate (inverse) and latency — the two signals available
|
||||
// without a full heartbeat history.
|
||||
func quickScore(resp *HeartbeatResponse, rtt time.Duration) float64 {
|
||||
maxRTT := BaseRoundTrip * 10
|
||||
latencyScore := 1.0 - float64(rtt)/float64(maxRTT)
|
||||
if latencyScore < 0 {
|
||||
latencyScore = 0
|
||||
}
|
||||
if resp == nil {
|
||||
// Connection worked but no response (old indexer): moderate score.
|
||||
return latencyScore * 50
|
||||
}
|
||||
fillScore := 1.0 - resp.FillRate // prefer less-loaded indexers
|
||||
return (0.5*latencyScore + 0.5*fillScore) * 100
|
||||
}
|
||||
@@ -2,12 +2,8 @@ package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/models"
|
||||
"os"
|
||||
@@ -47,45 +43,6 @@ func Verify(pub crypto.PubKey, data, sig []byte) (bool, error) {
|
||||
return pub.Verify(data, sig)
|
||||
}
|
||||
|
||||
func LoadKeyFromFilePrivate() (crypto.PrivKey, error) {
|
||||
path := conf.GetConfig().PrivateKeyPath
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block, _ := pem.Decode(data)
|
||||
keyAny, err := x509.ParsePKCS8PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
edKey, ok := keyAny.(ed25519.PrivateKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an ed25519 key")
|
||||
}
|
||||
return crypto.UnmarshalEd25519PrivateKey(edKey)
|
||||
}
|
||||
|
||||
func LoadKeyFromFilePublic() (crypto.PubKey, error) {
|
||||
path := conf.GetConfig().PublicKeyPath
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
block, _ := pem.Decode(data)
|
||||
keyAny, err := x509.ParsePKIXPublicKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
edKey, ok := keyAny.(ed25519.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("not an ed25519 key")
|
||||
}
|
||||
// Try to unmarshal as libp2p private key (supports ed25519, rsa, etc.)
|
||||
return crypto.UnmarshalEd25519PublicKey(edKey)
|
||||
}
|
||||
|
||||
func LoadPSKFromFile() (pnet.PSK, error) {
|
||||
path := conf.GetConfig().PSKPath
|
||||
data, err := os.ReadFile(path)
|
||||
|
||||
219
daemons/node/common/dht_discovery.go
Normal file
219
daemons/node/common/dht_discovery.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/rand"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"github.com/ipfs/go-cid"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
mh "github.com/multiformats/go-multihash"
|
||||
)
|
||||
|
||||
// FilterLoopbackAddrs strips loopback (127.x, ::1) and unspecified addresses
|
||||
// from an AddrInfo so we never hand peers an address they cannot dial externally.
|
||||
func FilterLoopbackAddrs(ai pp.AddrInfo) pp.AddrInfo {
|
||||
filtered := make([]ma.Multiaddr, 0, len(ai.Addrs))
|
||||
for _, addr := range ai.Addrs {
|
||||
ip, err := ExtractIP(addr.String())
|
||||
if err != nil || ip.IsLoopback() || ip.IsUnspecified() {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, addr)
|
||||
}
|
||||
return pp.AddrInfo{ID: ai.ID, Addrs: filtered}
|
||||
}
|
||||
|
||||
// RecommendedHeartbeatInterval is the target period between heartbeat ticks.
|
||||
// Indexers use this as the DHT Provide refresh interval.
|
||||
const RecommendedHeartbeatInterval = 60 * time.Second
|
||||
|
||||
// discoveryDHT is the DHT instance used for indexer discovery.
|
||||
// Set by SetDiscoveryDHT once the indexer service initialises its DHT.
|
||||
var discoveryDHT *dht.IpfsDHT
|
||||
|
||||
// SetDiscoveryDHT stores the DHT instance used by replenishIndexersFromDHT.
|
||||
// Called by NewIndexerService once the DHT is ready.
|
||||
func SetDiscoveryDHT(d *dht.IpfsDHT) {
|
||||
discoveryDHT = d
|
||||
}
|
||||
|
||||
// initNodeDHT creates a lightweight DHT client for pure nodes (no IndexerService).
|
||||
// Uses the seed indexers as bootstrap peers. Called lazily by ConnectToIndexers
|
||||
// when discoveryDHT is still nil after the initial warm-up delay.
|
||||
func initNodeDHT(h host.Host, seeds []Entry) {
|
||||
logger := oclib.GetLogger()
|
||||
bootstrapPeers := []pp.AddrInfo{}
|
||||
for _, s := range seeds {
|
||||
bootstrapPeers = append(bootstrapPeers, *s.Info)
|
||||
}
|
||||
d, err := dht.New(context.Background(), h,
|
||||
dht.Mode(dht.ModeClient),
|
||||
dht.ProtocolPrefix("oc"),
|
||||
dht.BootstrapPeers(bootstrapPeers...),
|
||||
)
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Msg("[dht] node DHT client init failed")
|
||||
return
|
||||
}
|
||||
SetDiscoveryDHT(d)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
|
||||
defer cancel()
|
||||
if err := d.Bootstrap(ctx); err != nil {
|
||||
logger.Warn().Err(err).Msg("[dht] node DHT client bootstrap failed")
|
||||
}
|
||||
logger.Info().Msg("[dht] node DHT client ready")
|
||||
}
|
||||
|
||||
// IndexerCID returns the well-known CID under which all indexers advertise.
|
||||
func IndexerCID() cid.Cid {
|
||||
h, _ := mh.Sum([]byte("/opencloud/indexers"), mh.SHA2_256, -1)
|
||||
return cid.NewCidV1(cid.Raw, h)
|
||||
}
|
||||
|
||||
// DiscoverIndexersFromDHT uses the DHT to find up to count indexers advertising
|
||||
// under the well-known key. Excludes self. Resolves addresses when the provider
|
||||
// record carries none.
|
||||
func DiscoverIndexersFromDHT(h host.Host, d *dht.IpfsDHT, count int) []pp.AddrInfo {
|
||||
logger := oclib.GetLogger()
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
c := IndexerCID()
|
||||
ch := d.FindProvidersAsync(ctx, c, count*2)
|
||||
seen := map[pp.ID]struct{}{}
|
||||
var results []pp.AddrInfo
|
||||
for ai := range ch {
|
||||
if ai.ID == h.ID() {
|
||||
continue
|
||||
}
|
||||
if _, dup := seen[ai.ID]; dup {
|
||||
continue
|
||||
}
|
||||
seen[ai.ID] = struct{}{}
|
||||
if len(ai.Addrs) == 0 {
|
||||
resolved, err := d.FindPeer(ctx, ai.ID)
|
||||
if err != nil {
|
||||
logger.Warn().Str("peer", ai.ID.String()).Msg("[dht] no addrs and FindPeer failed, skipping")
|
||||
continue
|
||||
}
|
||||
ai = resolved
|
||||
}
|
||||
ai = FilterLoopbackAddrs(ai)
|
||||
if len(ai.Addrs) == 0 {
|
||||
continue
|
||||
}
|
||||
results = append(results, ai)
|
||||
if len(results) >= count {
|
||||
break
|
||||
}
|
||||
}
|
||||
logger.Info().Int("found", len(results)).Msg("[dht] indexer discovery complete")
|
||||
return results
|
||||
}
|
||||
|
||||
// SelectByFillRate picks up to want providers using fill-rate weighted random
|
||||
// selection w(F) = F*(1-F) — peaks at F=0.5, prefers less-loaded indexers.
|
||||
// Providers with unknown fill rate receive F=0.5 (neutral prior).
|
||||
// Enforces subnet /24 diversity: at most one indexer per /24.
|
||||
func SelectByFillRate(providers []pp.AddrInfo, fillRates map[pp.ID]float64, want int) []pp.AddrInfo {
|
||||
if len(providers) == 0 || want <= 0 {
|
||||
return nil
|
||||
}
|
||||
type weighted struct {
|
||||
ai pp.AddrInfo
|
||||
weight float64
|
||||
}
|
||||
ws := make([]weighted, 0, len(providers))
|
||||
for _, ai := range providers {
|
||||
f, ok := fillRates[ai.ID]
|
||||
if !ok {
|
||||
f = 0.5
|
||||
}
|
||||
ws = append(ws, weighted{ai: ai, weight: f * (1 - f)})
|
||||
}
|
||||
// Shuffle first for fairness among equal-weight peers.
|
||||
rand.Shuffle(len(ws), func(i, j int) { ws[i], ws[j] = ws[j], ws[i] })
|
||||
// Sort descending by weight (simple insertion sort — small N).
|
||||
for i := 1; i < len(ws); i++ {
|
||||
for j := i; j > 0 && ws[j].weight > ws[j-1].weight; j-- {
|
||||
ws[j], ws[j-1] = ws[j-1], ws[j]
|
||||
}
|
||||
}
|
||||
|
||||
subnets := map[string]struct{}{}
|
||||
var selected []pp.AddrInfo
|
||||
for _, w := range ws {
|
||||
if len(selected) >= want {
|
||||
break
|
||||
}
|
||||
subnet := subnetOf(w.ai)
|
||||
if subnet != "" {
|
||||
if _, dup := subnets[subnet]; dup {
|
||||
continue
|
||||
}
|
||||
subnets[subnet] = struct{}{}
|
||||
}
|
||||
selected = append(selected, w.ai)
|
||||
}
|
||||
return selected
|
||||
}
|
||||
|
||||
// subnetOf returns the /24 subnet string for the first non-loopback address of ai.
|
||||
func subnetOf(ai pp.AddrInfo) string {
|
||||
for _, ma := range ai.Addrs {
|
||||
ip, err := ExtractIP(ma.String())
|
||||
if err != nil || ip.IsLoopback() {
|
||||
continue
|
||||
}
|
||||
parts := strings.Split(ip.String(), ".")
|
||||
if len(parts) >= 3 {
|
||||
return parts[0] + "." + parts[1] + "." + parts[2]
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// replenishIndexersFromDHT is called when an indexer heartbeat fails and more
|
||||
// indexers are needed. Queries the DHT and adds fresh entries to StaticIndexers.
|
||||
func replenishIndexersFromDHT(h host.Host, need int) {
|
||||
if need <= 0 || discoveryDHT == nil {
|
||||
return
|
||||
}
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Int("need", need).Msg("[dht] replenishing indexer pool from DHT")
|
||||
|
||||
providers := DiscoverIndexersFromDHT(h, discoveryDHT, need*3)
|
||||
selected := SelectByFillRate(providers, nil, need)
|
||||
if len(selected) == 0 {
|
||||
logger.Warn().Msg("[dht] no indexers found in DHT for replenishment")
|
||||
return
|
||||
}
|
||||
|
||||
added := 0
|
||||
for _, ai := range selected {
|
||||
addr := addrKey(ai)
|
||||
if !Indexers.ExistsAddr(addr) {
|
||||
adCopy := ai
|
||||
Indexers.SetAddr(addr, &adCopy)
|
||||
added++
|
||||
}
|
||||
}
|
||||
|
||||
if added > 0 {
|
||||
logger.Info().Int("added", added).Msg("[dht] indexers added from DHT")
|
||||
Indexers.NudgeIt()
|
||||
}
|
||||
}
|
||||
|
||||
// addrKey returns the canonical map key for an AddrInfo.
|
||||
// The PeerID is used as key so the same peer is never stored twice regardless
|
||||
// of which of its addresses was seen first.
|
||||
func addrKey(ai pp.AddrInfo) string {
|
||||
return ai.ID.String()
|
||||
}
|
||||
@@ -4,8 +4,14 @@ import (
|
||||
"context"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
)
|
||||
|
||||
type HeartBeatStreamed interface {
|
||||
GetUptimeTracker() *UptimeTracker
|
||||
}
|
||||
|
||||
type DiscoveryPeer interface {
|
||||
GetPeerRecord(ctx context.Context, key string) ([]*peer.Peer, error)
|
||||
GetPubSub(topicName string) *pubsub.Topic
|
||||
}
|
||||
|
||||
94
daemons/node/common/search_tracker.go
Normal file
94
daemons/node/common/search_tracker.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"oc-discovery/conf"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
// SearchIdleTimeout returns the configured search idle timeout (default 5s).
|
||||
func SearchIdleTimeout() time.Duration {
|
||||
if t := conf.GetConfig().SearchTimeout; t > 0 {
|
||||
return time.Duration(t) * time.Second
|
||||
}
|
||||
return 5 * time.Second
|
||||
}
|
||||
|
||||
// searchEntry holds the lifecycle state for one active search.
|
||||
type searchEntry struct {
|
||||
cancel context.CancelFunc
|
||||
timer *time.Timer
|
||||
idleTimeout time.Duration
|
||||
}
|
||||
|
||||
// SearchTracker tracks one active search per user (peer or resource).
|
||||
// Each search is keyed by a composite "user:searchID" so that a replaced
|
||||
// search's late-arriving results can be told apart from the current one.
|
||||
//
|
||||
// Typical usage:
|
||||
//
|
||||
// ctx, cancel := context.WithCancel(parent)
|
||||
// key := tracker.Register(userKey, cancel, idleTimeout)
|
||||
// defer tracker.Cancel(key)
|
||||
// // ... on each result: tracker.ResetIdle(key) + tracker.IsActive(key)
|
||||
type SearchTracker struct {
|
||||
mu sync.Mutex
|
||||
entries map[string]*searchEntry
|
||||
}
|
||||
|
||||
func NewSearchTracker() *SearchTracker {
|
||||
return &SearchTracker{entries: map[string]*searchEntry{}}
|
||||
}
|
||||
|
||||
// Register starts a new search for baseUser, cancelling any previous one.
|
||||
// Returns the composite key "baseUser:searchID" to be used as the search identifier.
|
||||
func (t *SearchTracker) Register(baseUser string, cancel context.CancelFunc, idleTimeout time.Duration) string {
|
||||
compositeKey := baseUser + ":" + uuid.New().String()
|
||||
t.mu.Lock()
|
||||
t.cancelByPrefix(baseUser)
|
||||
e := &searchEntry{cancel: cancel, idleTimeout: idleTimeout}
|
||||
e.timer = time.AfterFunc(idleTimeout, func() { t.Cancel(compositeKey) })
|
||||
t.entries[compositeKey] = e
|
||||
t.mu.Unlock()
|
||||
return compositeKey
|
||||
}
|
||||
|
||||
// Cancel cancels the search(es) matching user (bare user key or composite key).
|
||||
func (t *SearchTracker) Cancel(user string) {
|
||||
t.mu.Lock()
|
||||
t.cancelByPrefix(user)
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// ResetIdle resets the idle timer for compositeKey after a response arrives.
|
||||
func (t *SearchTracker) ResetIdle(compositeKey string) {
|
||||
t.mu.Lock()
|
||||
if e, ok := t.entries[compositeKey]; ok {
|
||||
e.timer.Reset(e.idleTimeout)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
}
|
||||
|
||||
// IsActive returns true if compositeKey is still the current active search.
|
||||
func (t *SearchTracker) IsActive(compositeKey string) bool {
|
||||
t.mu.Lock()
|
||||
_, ok := t.entries[compositeKey]
|
||||
t.mu.Unlock()
|
||||
return ok
|
||||
}
|
||||
|
||||
// cancelByPrefix cancels all entries whose key equals user or starts with "user:".
|
||||
// Must be called with t.mu held.
|
||||
func (t *SearchTracker) cancelByPrefix(user string) {
|
||||
for k, e := range t.entries {
|
||||
if k == user || strings.HasPrefix(k, user+":") {
|
||||
e.timer.Stop()
|
||||
e.cancel()
|
||||
delete(t.entries, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
68
daemons/node/common/utils.go
Normal file
68
daemons/node/common/utils.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
func PeerIsAlive(h host.Host, ad pp.AddrInfo) bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
err := h.Connect(ctx, ad)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func ExtractIP(addr string) (net.IP, error) {
|
||||
ma, err := multiaddr.NewMultiaddr(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ipStr, err := ma.ValueForProtocol(multiaddr.P_IP4)
|
||||
if err != nil {
|
||||
ipStr, err = ma.ValueForProtocol(multiaddr.P_IP6)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
return nil, fmt.Errorf("invalid IP: %s", ipStr)
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func GetIndexer(addrOrId string) *pp.AddrInfo {
|
||||
return Indexers.GetAddr(addrOrId)
|
||||
}
|
||||
|
||||
func GetIndexersIDs() []pp.ID {
|
||||
return Indexers.GetAddrIDs()
|
||||
}
|
||||
|
||||
func GetIndexersStr() []string {
|
||||
return Indexers.GetAddrsStr()
|
||||
}
|
||||
|
||||
func GetIndexers() []*pp.AddrInfo {
|
||||
entries := Indexers.GetAddrs()
|
||||
result := make([]*pp.AddrInfo, 0, len(entries))
|
||||
for _, e := range entries {
|
||||
result = append(result, e.Info)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func Shuffle[T any](slice []T) []T {
|
||||
rand.Shuffle(len(slice), func(i, j int) {
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
})
|
||||
return slice
|
||||
}
|
||||
31
daemons/node/connection_gater.go
Normal file
31
daemons/node/connection_gater.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package node
|
||||
|
||||
import (
|
||||
"github.com/libp2p/go-libp2p/core/control"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
ma "github.com/multiformats/go-multiaddr"
|
||||
)
|
||||
|
||||
// OCConnectionGater allows all connections unconditionally.
|
||||
// Peer validation (local DB + DHT by peer_id) is enforced at the stream level
|
||||
// in each handler, so ProtocolHeartbeat and ProtocolPublish — through which a
|
||||
// node first registers itself — are never blocked.
|
||||
type OCConnectionGater struct {
|
||||
host host.Host
|
||||
}
|
||||
|
||||
func newOCConnectionGater(h host.Host) *OCConnectionGater {
|
||||
return &OCConnectionGater{host: h}
|
||||
}
|
||||
|
||||
func (g *OCConnectionGater) InterceptPeerDial(_ pp.ID) bool { return true }
|
||||
func (g *OCConnectionGater) InterceptAddrDial(_ pp.ID, _ ma.Multiaddr) bool { return true }
|
||||
func (g *OCConnectionGater) InterceptAccept(_ network.ConnMultiaddrs) bool { return true }
|
||||
func (g *OCConnectionGater) InterceptSecured(_ network.Direction, _ pp.ID, _ network.ConnMultiaddrs) bool {
|
||||
return true
|
||||
}
|
||||
func (g *OCConnectionGater) InterceptUpgraded(_ network.Conn) (bool, control.DisconnectReason) {
|
||||
return true, 0
|
||||
}
|
||||
207
daemons/node/indexer/behavior.go
Normal file
207
daemons/node/indexer/behavior.go
Normal file
@@ -0,0 +1,207 @@
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ── defaults ──────────────────────────────────────────────────────────────────
|
||||
|
||||
const (
|
||||
defaultMaxHBPerMinute = 5
|
||||
defaultMaxPublishPerMin = 10
|
||||
defaultMaxGetPerMin = 50
|
||||
strikeThreshold = 3
|
||||
banDuration = 10 * time.Minute
|
||||
behaviorWindowDur = 60 * time.Second
|
||||
)
|
||||
|
||||
// ── per-node state ────────────────────────────────────────────────────────────
|
||||
|
||||
type nodeBehavior struct {
|
||||
mu sync.Mutex
|
||||
knownDID string
|
||||
hbTimes []time.Time
|
||||
pubTimes []time.Time
|
||||
getTimes []time.Time
|
||||
strikes int
|
||||
bannedUntil time.Time
|
||||
}
|
||||
|
||||
func (nb *nodeBehavior) isBanned() bool {
|
||||
return time.Now().UTC().Before(nb.bannedUntil)
|
||||
}
|
||||
|
||||
func (nb *nodeBehavior) strike(n int) {
|
||||
nb.strikes += n
|
||||
if nb.strikes >= strikeThreshold {
|
||||
nb.bannedUntil = time.Now().Add(banDuration)
|
||||
}
|
||||
}
|
||||
|
||||
func pruneWindow(ts []time.Time, dur time.Duration) []time.Time {
|
||||
cutoff := time.Now().Add(-dur)
|
||||
i := 0
|
||||
for i < len(ts) && ts[i].Before(cutoff) {
|
||||
i++
|
||||
}
|
||||
return ts[i:]
|
||||
}
|
||||
|
||||
// recordInWindow appends now to the window slice and returns false (+ adds a
|
||||
// strike) when the count exceeds max.
|
||||
func (nb *nodeBehavior) recordInWindow(ts *[]time.Time, max int) bool {
|
||||
*ts = pruneWindow(*ts, behaviorWindowDur)
|
||||
if len(*ts) >= max {
|
||||
nb.strike(1)
|
||||
return false
|
||||
}
|
||||
*ts = append(*ts, time.Now())
|
||||
return true
|
||||
}
|
||||
|
||||
// ── NodeBehaviorTracker ───────────────────────────────────────────────────────
|
||||
|
||||
// NodeBehaviorTracker is the indexer-side per-node compliance monitor.
|
||||
// It is entirely local: no state is shared with other indexers.
|
||||
type NodeBehaviorTracker struct {
|
||||
mu sync.RWMutex
|
||||
nodes map[pp.ID]*nodeBehavior
|
||||
|
||||
maxHB int
|
||||
maxPub int
|
||||
maxGet int
|
||||
}
|
||||
|
||||
func newNodeBehaviorTracker() *NodeBehaviorTracker {
|
||||
cfg := conf.GetConfig()
|
||||
return &NodeBehaviorTracker{
|
||||
nodes: make(map[pp.ID]*nodeBehavior),
|
||||
maxHB: common.CfgOr(cfg.MaxHBPerMinute, defaultMaxHBPerMinute),
|
||||
maxPub: common.CfgOr(cfg.MaxPublishPerMinute, defaultMaxPublishPerMin),
|
||||
maxGet: common.CfgOr(cfg.MaxGetPerMinute, defaultMaxGetPerMin),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *NodeBehaviorTracker) get(pid pp.ID) *nodeBehavior {
|
||||
t.mu.RLock()
|
||||
nb := t.nodes[pid]
|
||||
t.mu.RUnlock()
|
||||
if nb != nil {
|
||||
return nb
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if nb = t.nodes[pid]; nb == nil {
|
||||
nb = &nodeBehavior{}
|
||||
t.nodes[pid] = nb
|
||||
}
|
||||
return nb
|
||||
}
|
||||
|
||||
// IsBanned returns true when the peer is in an active ban period.
|
||||
func (t *NodeBehaviorTracker) IsBanned(pid pp.ID) bool {
|
||||
nb := t.get(pid)
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
return nb.isBanned()
|
||||
}
|
||||
|
||||
// RecordHeartbeat checks heartbeat cadence. Returns an error if the peer is
|
||||
// flooding (too many heartbeats in the sliding window).
|
||||
func (t *NodeBehaviorTracker) RecordHeartbeat(pid pp.ID) error {
|
||||
nb := t.get(pid)
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.isBanned() {
|
||||
return errors.New("peer is banned")
|
||||
}
|
||||
if !nb.recordInWindow(&nb.hbTimes, t.maxHB) {
|
||||
return errors.New("heartbeat flood detected")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckIdentity verifies that the DID associated with a PeerID never changes.
|
||||
// A DID change is a strong signal of identity spoofing.
|
||||
func (t *NodeBehaviorTracker) CheckIdentity(pid pp.ID, did string) error {
|
||||
if did == "" {
|
||||
return nil
|
||||
}
|
||||
nb := t.get(pid)
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.knownDID == "" {
|
||||
nb.knownDID = did
|
||||
return nil
|
||||
}
|
||||
if nb.knownDID != did {
|
||||
nb.strike(2) // identity change is severe
|
||||
return errors.New("DID mismatch for peer " + pid.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordBadSignature registers a cryptographic verification failure.
|
||||
// A single bad signature is worth 2 strikes (near-immediate ban).
|
||||
func (t *NodeBehaviorTracker) RecordBadSignature(pid pp.ID) {
|
||||
nb := t.get(pid)
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
nb.strike(2)
|
||||
}
|
||||
|
||||
// RecordPublish checks publish volume. Returns an error if the peer is
|
||||
// sending too many publish requests.
|
||||
func (t *NodeBehaviorTracker) RecordPublish(pid pp.ID) error {
|
||||
nb := t.get(pid)
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.isBanned() {
|
||||
return errors.New("peer is banned")
|
||||
}
|
||||
if !nb.recordInWindow(&nb.pubTimes, t.maxPub) {
|
||||
return errors.New("publish volume exceeded")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordGet checks get volume. Returns an error if the peer is enumerating
|
||||
// the DHT at an abnormal rate.
|
||||
func (t *NodeBehaviorTracker) RecordGet(pid pp.ID) error {
|
||||
nb := t.get(pid)
|
||||
nb.mu.Lock()
|
||||
defer nb.mu.Unlock()
|
||||
if nb.isBanned() {
|
||||
return errors.New("peer is banned")
|
||||
}
|
||||
if !nb.recordInWindow(&nb.getTimes, t.maxGet) {
|
||||
return errors.New("get volume exceeded")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleanup removes the behavior entry for a peer if it is not currently banned.
|
||||
// Called when the peer is evicted from StreamRecords by the GC.
|
||||
func (t *NodeBehaviorTracker) Cleanup(pid pp.ID) {
|
||||
t.mu.RLock()
|
||||
nb := t.nodes[pid]
|
||||
t.mu.RUnlock()
|
||||
if nb == nil {
|
||||
return
|
||||
}
|
||||
nb.mu.Lock()
|
||||
banned := nb.isBanned()
|
||||
nb.mu.Unlock()
|
||||
if !banned {
|
||||
t.mu.Lock()
|
||||
delete(t.nodes, pid)
|
||||
t.mu.Unlock()
|
||||
}
|
||||
}
|
||||
@@ -5,70 +5,78 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/conf"
|
||||
"io"
|
||||
"math/rand"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
pp "cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
lpp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
type PeerRecord struct {
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"` // real PEER ID
|
||||
PeerID string `json:"peer_id"`
|
||||
PubKey []byte `json:"pub_key"`
|
||||
APIUrl string `json:"api_url"`
|
||||
StreamAddress string `json:"stream_address"`
|
||||
NATSAddress string `json:"nats_address"`
|
||||
WalletAddress string `json:"wallet_address"`
|
||||
Signature []byte `json:"signature"`
|
||||
ExpiryDate time.Time `json:"expiry_date"`
|
||||
// DefaultTTLSeconds is the default TTL for peer records when the publisher
|
||||
// does not declare a custom TTL. Exported so the node package can reference it.
|
||||
const DefaultTTLSeconds = 120
|
||||
|
||||
TTL int `json:"ttl"` // max of hop diffusion
|
||||
NoPub bool `json:"no_pub"`
|
||||
// maxTTLSeconds caps how far in the future a publisher can set their ExpiryDate.
|
||||
const maxTTLSeconds = 86400 // 24h
|
||||
|
||||
// tombstoneTTL is how long a signed delete record stays alive in the DHT —
|
||||
// long enough to propagate everywhere, short enough not to linger forever.
|
||||
const tombstoneTTL = 10 * time.Minute
|
||||
|
||||
type PeerRecordPayload struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
DID string `json:"did"`
|
||||
PubKey []byte `json:"public_key"`
|
||||
ExpiryDate time.Time `json:"expiry_date"`
|
||||
IsNano bool `json:"is_nano"`
|
||||
// TTLSeconds is the publisher's declared lifetime for this record in seconds.
|
||||
// 0 means "use the default (120 s)". Included in the signed payload so it
|
||||
// cannot be altered by an intermediary.
|
||||
TTLSeconds int `json:"ttl_seconds,omitempty"`
|
||||
}
|
||||
|
||||
type PeerRecord struct {
|
||||
PeerRecordPayload
|
||||
CreationDate time.Time `json:"creation_date"`
|
||||
UpdateDate time.Time `json:"update_date"`
|
||||
PeerID string `json:"peer_id"`
|
||||
APIUrl string `json:"api_url"`
|
||||
StreamAddress string `json:"stream_address"`
|
||||
NATSAddress string `json:"nats_address"`
|
||||
WalletAddress string `json:"wallet_address"`
|
||||
Location *pp.PeerLocation `json:"location,omitempty"`
|
||||
Signature []byte `json:"signature"`
|
||||
}
|
||||
|
||||
func (p *PeerRecord) Sign() error {
|
||||
priv, err := common.LoadKeyFromFilePrivate()
|
||||
priv, err := tools.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dht := PeerRecord{
|
||||
Name: p.Name,
|
||||
DID: p.DID,
|
||||
PubKey: p.PubKey,
|
||||
ExpiryDate: p.ExpiryDate,
|
||||
}
|
||||
payload, _ := json.Marshal(dht)
|
||||
payload, _ := json.Marshal(p.PeerRecordPayload)
|
||||
b, err := common.Sign(priv, payload)
|
||||
p.Signature = b
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *PeerRecord) Verify() (crypto.PubKey, error) {
|
||||
fmt.Println(p.PubKey)
|
||||
pubKey, err := crypto.UnmarshalPublicKey(p.PubKey) // retrieve pub key in message
|
||||
if err != nil {
|
||||
fmt.Println("UnmarshalPublicKey")
|
||||
return pubKey, err
|
||||
}
|
||||
dht := PeerRecord{
|
||||
Name: p.Name,
|
||||
DID: p.DID,
|
||||
PubKey: p.PubKey,
|
||||
ExpiryDate: p.ExpiryDate,
|
||||
}
|
||||
payload, _ := json.Marshal(dht)
|
||||
payload, _ := json.Marshal(p.PeerRecordPayload)
|
||||
|
||||
if ok, _ := common.Verify(pubKey, payload, p.Signature); !ok { // verify minimal message was sign per pubKey
|
||||
fmt.Println("Verify")
|
||||
if ok, _ := pubKey.Verify(payload, p.Signature); !ok { // verify minimal message was sign per pubKey
|
||||
return pubKey, errors.New("invalid signature")
|
||||
}
|
||||
return pubKey, nil
|
||||
@@ -79,7 +87,6 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
fmt.Println("ExtractPeer MarshalPublicKey")
|
||||
rel := pp.NONE
|
||||
if ourkey == key { // at this point is PeerID is same as our... we are... thats our peer INFO
|
||||
rel = pp.SELF
|
||||
@@ -90,7 +97,6 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
|
||||
UUID: pr.DID,
|
||||
Name: pr.Name,
|
||||
},
|
||||
State: pp.ONLINE,
|
||||
Relation: rel, // VERIFY.... it crush nothing
|
||||
PeerID: pr.PeerID,
|
||||
PublicKey: base64.StdEncoding.EncodeToString(pubBytes),
|
||||
@@ -98,28 +104,53 @@ func (pr *PeerRecord) ExtractPeer(ourkey string, key string, pubKey crypto.PubKe
|
||||
StreamAddress: pr.StreamAddress,
|
||||
NATSAddress: pr.NATSAddress,
|
||||
WalletAddress: pr.WalletAddress,
|
||||
Location: pr.Location,
|
||||
}
|
||||
if time.Now().UTC().After(pr.ExpiryDate) { // is expired
|
||||
p.State = pp.OFFLINE // then is considers OFFLINE
|
||||
}
|
||||
b, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return pp.SELF == p.Relation, nil, err
|
||||
}
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.PEER,
|
||||
Method: int(tools.CREATE_PEER),
|
||||
Payload: b,
|
||||
})
|
||||
if p.State == pp.OFFLINE {
|
||||
if time.Now().UTC().After(pr.ExpiryDate) {
|
||||
return pp.SELF == p.Relation, nil, errors.New("peer " + key + " is offline")
|
||||
}
|
||||
return pp.SELF == p.Relation, p, nil
|
||||
}
|
||||
|
||||
// TombstonePayload is the signed body of a delete request.
|
||||
// Only the owner's private key can produce a valid signature over this payload.
|
||||
type TombstonePayload struct {
|
||||
DID string `json:"did"`
|
||||
PeerID string `json:"peer_id"`
|
||||
DeletedAt time.Time `json:"deleted_at"`
|
||||
}
|
||||
|
||||
// TombstoneRecord is stored in the DHT at /node/{DID} to signal that a peer
|
||||
// has voluntarily left the network. The Tombstone bool field acts as a
|
||||
// discriminator so validators can distinguish it from a live PeerRecord.
|
||||
type TombstoneRecord struct {
|
||||
TombstonePayload
|
||||
PubKey []byte `json:"pub_key"`
|
||||
Tombstone bool `json:"tombstone"`
|
||||
Signature []byte `json:"signature"`
|
||||
}
|
||||
|
||||
func (ts *TombstoneRecord) Verify() (crypto.PubKey, error) {
|
||||
pubKey, err := crypto.UnmarshalPublicKey(ts.PubKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payload, _ := json.Marshal(ts.TombstonePayload)
|
||||
if ok, _ := pubKey.Verify(payload, ts.Signature); !ok {
|
||||
return nil, errors.New("invalid tombstone signature")
|
||||
}
|
||||
return pubKey, nil
|
||||
}
|
||||
|
||||
// isTombstone returns true if data is a valid, well-formed TombstoneRecord.
|
||||
func isTombstone(data []byte) bool {
|
||||
var ts TombstoneRecord
|
||||
return json.Unmarshal(data, &ts) == nil && ts.Tombstone
|
||||
}
|
||||
|
||||
type GetValue struct {
|
||||
Key string `json:"key"`
|
||||
Key string `json:"key"`
|
||||
PeerID string `json:"peer_id,omitempty"`
|
||||
}
|
||||
|
||||
type GetResponse struct {
|
||||
@@ -127,162 +158,433 @@ type GetResponse struct {
|
||||
Records map[string]PeerRecord `json:"records,omitempty"`
|
||||
}
|
||||
|
||||
func (ix *IndexerService) genKey(did string) string {
|
||||
return "/node/" + did
|
||||
}
|
||||
|
||||
func (ix *IndexerService) genPIDKey(peerID string) string {
|
||||
return "/pid/" + peerID
|
||||
}
|
||||
|
||||
// isPeerKnown is the stream-level gate: returns true if pid is allowed.
|
||||
// Check order (fast → slow):
|
||||
// 1. In-memory stream records — currently heartbeating to this indexer.
|
||||
// 2. Local DB by peer_id — known peer, blacklist enforced here.
|
||||
// 3. DHT /pid/{peerID} → /node/{DID} — registered on any indexer.
|
||||
//
|
||||
// ProtocolHeartbeat and ProtocolPublish handlers do NOT call this — they are
|
||||
// the streams through which a node first makes itself known.
|
||||
func (ix *IndexerService) isPeerKnown(pid lpp.ID) bool {
|
||||
// 1. Fast path: active heartbeat session.
|
||||
ix.StreamMU.RLock()
|
||||
_, active := ix.StreamRecords[common.ProtocolHeartbeat][pid]
|
||||
ix.StreamMU.RUnlock()
|
||||
if active {
|
||||
return true
|
||||
}
|
||||
// 2. Local DB: known peer (handles blacklist).
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
results := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
|
||||
},
|
||||
}, pid.String(), false, 0, 1)
|
||||
for _, item := range results.Data {
|
||||
p, ok := item.(*pp.Peer)
|
||||
if !ok || p.PeerID != pid.String() {
|
||||
continue
|
||||
}
|
||||
return p.Relation != pp.BLACKLIST
|
||||
}
|
||||
// 3. DHT lookup by peer_id.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
did, err := ix.DHT.GetValue(ctx, ix.genPIDKey(pid.String()))
|
||||
cancel()
|
||||
if err != nil || len(did) == 0 {
|
||||
return false
|
||||
}
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
val, err := ix.DHT.GetValue(ctx2, ix.genKey(string(did)))
|
||||
cancel2()
|
||||
return err == nil && !isTombstone(val)
|
||||
}
|
||||
|
||||
func (ix *IndexerService) initNodeHandler() {
|
||||
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleNodeHeartbeat)
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("Init Node Handler")
|
||||
|
||||
// Each heartbeat from a node carries a freshly signed PeerRecord.
|
||||
// Republish it to the DHT so the record never expires as long as the node
|
||||
// is alive — no separate publish stream needed from the node side.
|
||||
ix.AfterHeartbeat = func(hb *common.Heartbeat) {
|
||||
// Priority 1: use the fresh signed PeerRecord embedded in the heartbeat.
|
||||
// Each heartbeat tick, the node re-signs with ExpiryDate = now+2min, so
|
||||
// this record is always fresh. Fetching from DHT would give a stale expiry.
|
||||
var rec PeerRecord
|
||||
if len(hb.Record) > 0 {
|
||||
if err := json.Unmarshal(hb.Record, &rec); err != nil {
|
||||
logger.Warn().Err(err).Msg("indexer: heartbeat embedded record unmarshal failed")
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// Fallback: node didn't embed a record yet (first heartbeat before claimInfo).
|
||||
// Fetch from DHT using the DID resolved by HandleHeartbeat.
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
res, err := ix.DHT.GetValue(ctx2, ix.genKey(hb.DID))
|
||||
cancel2()
|
||||
if err != nil {
|
||||
logger.Warn().Err(err).Str("did", hb.DID).Msg("indexer: DHT fetch for refresh failed")
|
||||
return
|
||||
}
|
||||
if err := json.Unmarshal(res, &rec); err != nil {
|
||||
logger.Warn().Err(err).Str("did", hb.DID).Msg("indexer: heartbeat record unmarshal failed")
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: heartbeat record signature invalid")
|
||||
return
|
||||
}
|
||||
// Don't republish if a tombstone was recently stored for this DID:
|
||||
// the peer explicitly left and we must not re-animate their record.
|
||||
ix.deletedDIDsMu.Lock()
|
||||
if t, ok := ix.deletedDIDs[rec.DID]; ok {
|
||||
if time.Since(t) < tombstoneTTL {
|
||||
ix.deletedDIDsMu.Unlock()
|
||||
return
|
||||
}
|
||||
// tombstoneTTL elapsed — peer is allowed to re-register.
|
||||
delete(ix.deletedDIDs, rec.DID)
|
||||
}
|
||||
ix.deletedDIDsMu.Unlock()
|
||||
// Keep StreamRecord.Record in sync so BuildHeartbeatResponse always
|
||||
// sees a populated PeerRecord (Name, DID, etc.) regardless of whether
|
||||
// handleNodePublish ran before or after the heartbeat stream was opened.
|
||||
if pid, err := lpp.Decode(rec.PeerID); err == nil {
|
||||
ix.StreamMU.Lock()
|
||||
if srec, ok := ix.StreamRecords[common.ProtocolHeartbeat][pid]; ok {
|
||||
srec.Record = rec
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
}
|
||||
data, err := json.Marshal(rec)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, ix.genKey(rec.DID), data); err != nil {
|
||||
logger.Warn().Err(err).Str("did", rec.DID).Msg("indexer: DHT refresh /node/ failed")
|
||||
}
|
||||
cancel()
|
||||
// /pid/ is written unconditionally — the gater queries by PeerID and this
|
||||
// index must stay fresh regardless of whether the /node/ write succeeded.
|
||||
if rec.PeerID != "" {
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(rec.PeerID), []byte(rec.DID)); err != nil {
|
||||
logger.Warn().Err(err).Str("pid", rec.PeerID).Msg("indexer: DHT refresh /pid/ failed")
|
||||
}
|
||||
cancel2()
|
||||
}
|
||||
}
|
||||
ix.Host.SetStreamHandler(common.ProtocolHeartbeat, ix.HandleHeartbeat)
|
||||
ix.Host.SetStreamHandler(common.ProtocolPublish, ix.handleNodePublish)
|
||||
ix.Host.SetStreamHandler(common.ProtocolGet, ix.handleNodeGet)
|
||||
ix.Host.SetStreamHandler(common.ProtocolDelete, ix.handleNodeDelete)
|
||||
ix.Host.SetStreamHandler(common.ProtocolIndirectProbe, ix.handleIndirectProbe)
|
||||
ix.Host.SetStreamHandler(common.ProtocolIndexerCandidates, ix.handleCandidateRequest)
|
||||
ix.initSearchHandlers()
|
||||
}
|
||||
|
||||
// handleCandidateRequest responds to a node's consensus candidate request.
|
||||
// Returns a random sample of indexers from the local DHT cache.
|
||||
func (ix *IndexerService) handleCandidateRequest(s network.Stream) {
|
||||
defer s.Close()
|
||||
if !ix.isPeerKnown(s.Conn().RemotePeer()) {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Warn().Str("peer", s.Conn().RemotePeer().String()).Msg("[candidates] unknown peer, rejecting stream")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
s.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
var req common.IndexerCandidatesRequest
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
return
|
||||
}
|
||||
if req.Count <= 0 || req.Count > 10 {
|
||||
req.Count = 3
|
||||
}
|
||||
ix.dhtCacheMu.RLock()
|
||||
cache := make([]dhtCacheEntry, len(ix.dhtCache))
|
||||
copy(cache, ix.dhtCache)
|
||||
ix.dhtCacheMu.RUnlock()
|
||||
|
||||
// Shuffle for randomness: each voter offers a different subset.
|
||||
rand.Shuffle(len(cache), func(i, j int) { cache[i], cache[j] = cache[j], cache[i] })
|
||||
candidates := make([]lpp.AddrInfo, 0, req.Count)
|
||||
for _, e := range cache {
|
||||
if len(candidates) >= req.Count {
|
||||
break
|
||||
}
|
||||
candidates = append(candidates, e.AI)
|
||||
}
|
||||
json.NewEncoder(s).Encode(common.IndexerCandidatesResponse{Candidates: candidates})
|
||||
}
|
||||
|
||||
func (ix *IndexerService) handleNodePublish(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
remotePeer := s.Conn().RemotePeer()
|
||||
if err := ix.behavior.RecordPublish(remotePeer); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("publish refused")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
for {
|
||||
var rec PeerRecord
|
||||
if err := json.NewDecoder(s).Decode(&rec); err != nil {
|
||||
continue
|
||||
}
|
||||
rec2 := PeerRecord{
|
||||
Name: rec.Name,
|
||||
DID: rec.DID, // REAL PEER ID
|
||||
PubKey: rec.PubKey,
|
||||
PeerID: rec.PeerID,
|
||||
}
|
||||
if _, err := rec2.Verify(); err != nil {
|
||||
logger.Err(err)
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
|
||||
strings.Contains(err.Error(), "reset") ||
|
||||
strings.Contains(err.Error(), "closed") ||
|
||||
strings.Contains(err.Error(), "too many connections") {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) { // already expired
|
||||
logger.Warn().Msg(rec.PeerID + " is expired.")
|
||||
continue
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
ix.behavior.RecordBadSignature(remotePeer)
|
||||
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("bad signature on publish")
|
||||
return
|
||||
}
|
||||
pid, err := peer.Decode(rec.PeerID)
|
||||
if err := ix.behavior.CheckIdentity(remotePeer, rec.DID); err != nil {
|
||||
logger.Warn().Err(err).Msg("identity mismatch on publish")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if rec.PeerID == "" || rec.ExpiryDate.Before(time.Now().UTC()) {
|
||||
logger.Err(errors.New(rec.PeerID + " is expired."))
|
||||
return
|
||||
}
|
||||
pid, err := lpp.Decode(rec.PeerID)
|
||||
if err != nil {
|
||||
continue
|
||||
return
|
||||
}
|
||||
|
||||
ix.StreamMU.Lock()
|
||||
|
||||
if ix.StreamRecords[common.ProtocolPublish] == nil {
|
||||
ix.StreamRecords[common.ProtocolPublish] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
|
||||
defer ix.StreamMU.Unlock()
|
||||
if ix.StreamRecords[common.ProtocolHeartbeat] == nil {
|
||||
ix.StreamRecords[common.ProtocolHeartbeat] = map[lpp.ID]*common.StreamRecord[PeerRecord]{}
|
||||
}
|
||||
streams := ix.StreamRecords[common.ProtocolPublish]
|
||||
|
||||
streams := ix.StreamRecords[common.ProtocolHeartbeat]
|
||||
if srec, ok := streams[pid]; ok {
|
||||
fmt.Println("UPDATE PUBLISH", pid)
|
||||
srec.DID = rec.DID
|
||||
srec.Record = rec
|
||||
srec.LastSeen = time.Now().UTC()
|
||||
} else {
|
||||
fmt.Println("CREATE PUBLISH", pid)
|
||||
streams[pid] = &common.StreamRecord[PeerRecord]{ // HeartBeat wil
|
||||
DID: rec.DID,
|
||||
Record: rec,
|
||||
LastSeen: time.Now().UTC(),
|
||||
}
|
||||
srec.HeartbeatStream.UptimeTracker.LastSeen = time.Now().UTC()
|
||||
}
|
||||
|
||||
if ix.LongLivedPubSubs[common.TopicPubSubNodeActivity] != nil && !rec.NoPub {
|
||||
ad, err := peer.AddrInfoFromString("/ip4/" + conf.GetConfig().Hostname + " /tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + " /p2p/" + ix.Host.ID().String())
|
||||
if err == nil {
|
||||
if b, err := json.Marshal(common.TopicNodeActivityPub{
|
||||
Disposer: *ad,
|
||||
DID: rec.DID,
|
||||
Name: rec.Name,
|
||||
PeerID: pid.String(),
|
||||
NodeActivity: pp.ONLINE,
|
||||
}); err == nil {
|
||||
ix.LongLivedPubSubs[common.TopicPubSubNodeActivity].Publish(context.Background(), b)
|
||||
}
|
||||
}
|
||||
key := ix.genKey(rec.DID)
|
||||
data, err := json.Marshal(rec)
|
||||
if err != nil {
|
||||
logger.Err(err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, key, data); err != nil {
|
||||
logger.Err(err)
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
cancel()
|
||||
|
||||
if rec.TTL > 0 {
|
||||
rec.NoPub = true
|
||||
for _, ad := range common.StaticIndexers {
|
||||
if ad.ID == s.Conn().RemotePeer() {
|
||||
continue
|
||||
}
|
||||
if common.StreamIndexers[common.ProtocolPublish][ad.ID] == nil {
|
||||
continue
|
||||
}
|
||||
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
|
||||
rec.TTL -= 1
|
||||
if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream
|
||||
continue
|
||||
}
|
||||
// Secondary index: /pid/<peerID> → DID, so peers can resolve by libp2p PeerID.
|
||||
if rec.PeerID != "" {
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(rec.PeerID), []byte(rec.DID)); err != nil {
|
||||
logger.Err(err).Str("pid", rec.PeerID).Msg("indexer: failed to write pid index")
|
||||
}
|
||||
cancel2()
|
||||
}
|
||||
ix.StreamMU.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (ix *IndexerService) handleNodeGet(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
remotePeer := s.Conn().RemotePeer()
|
||||
if !ix.isPeerKnown(remotePeer) {
|
||||
logger.Warn().Str("peer", remotePeer.String()).Msg("[get] unknown peer, rejecting stream")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if err := ix.behavior.RecordGet(remotePeer); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("get refused")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
for {
|
||||
var req GetValue
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
|
||||
strings.Contains(err.Error(), "reset") ||
|
||||
strings.Contains(err.Error(), "closed") ||
|
||||
strings.Contains(err.Error(), "too many connections") {
|
||||
return
|
||||
}
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
ix.StreamMU.Lock()
|
||||
|
||||
if ix.StreamRecords[common.ProtocolGet] == nil {
|
||||
ix.StreamRecords[common.ProtocolGet] = map[peer.ID]*common.StreamRecord[PeerRecord]{}
|
||||
resp := GetResponse{Found: false, Records: map[string]PeerRecord{}}
|
||||
|
||||
// Resolve DID key: by PeerID (secondary /pid/ index) or direct DID key.
|
||||
var key string
|
||||
if req.PeerID != "" {
|
||||
pidCtx, pidCancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
did, err := ix.DHT.GetValue(pidCtx, ix.genPIDKey(req.PeerID))
|
||||
pidCancel()
|
||||
if err == nil {
|
||||
key = string(did)
|
||||
}
|
||||
} else {
|
||||
key = req.Key
|
||||
}
|
||||
resp := GetResponse{
|
||||
Found: false,
|
||||
Records: map[string]PeerRecord{},
|
||||
}
|
||||
streams := ix.StreamRecords[common.ProtocolPublish]
|
||||
// simple lookup by PeerID (or DID)
|
||||
for _, rec := range streams {
|
||||
if rec.Record.DID == req.Key || rec.Record.PeerID == req.Key || rec.Record.Name == req.Key { // OK
|
||||
resp.Found = true
|
||||
resp.Records[rec.Record.PeerID] = rec.Record
|
||||
if rec.Record.DID == req.Key || rec.Record.PeerID == req.Key { // there unique... no need to proceed more...
|
||||
_ = json.NewEncoder(s).Encode(resp)
|
||||
ix.StreamMU.Unlock()
|
||||
return
|
||||
|
||||
if key != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
c, err := ix.DHT.GetValue(ctx, ix.genKey(key))
|
||||
cancel()
|
||||
if err == nil && !isTombstone(c) {
|
||||
var rec PeerRecord
|
||||
if json.Unmarshal(c, &rec) == nil {
|
||||
resp.Records[rec.PeerID] = rec
|
||||
}
|
||||
continue
|
||||
} else if err != nil {
|
||||
logger.Err(err).Msg("Failed to fetch PeerRecord from DHT " + key)
|
||||
}
|
||||
}
|
||||
// if not found ask to my neighboor indexers
|
||||
for pid, dsp := range ix.DisposedPeers {
|
||||
if _, ok := resp.Records[dsp.PeerID]; !ok && (dsp.Name == req.Key || dsp.DID == req.Key || dsp.PeerID == req.Key) {
|
||||
ctxTTL, err := context.WithTimeout(context.Background(), 120*time.Second)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ix.Host.Network().Connectedness(pid) != network.Connected {
|
||||
_ = ix.Host.Connect(ctxTTL, dsp.Disposer)
|
||||
str, err := ix.Host.NewStream(ctxTTL, pid, common.ProtocolGet)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for {
|
||||
if ctxTTL.Err() == context.DeadlineExceeded {
|
||||
break
|
||||
}
|
||||
var subResp GetResponse
|
||||
if err := json.NewDecoder(str).Decode(&resp); err != nil {
|
||||
continue
|
||||
}
|
||||
if subResp.Found {
|
||||
for k, v := range subResp.Records {
|
||||
if _, ok := resp.Records[k]; !ok {
|
||||
resp.Records[k] = v
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Not found
|
||||
|
||||
resp.Found = len(resp.Records) > 0
|
||||
_ = json.NewEncoder(s).Encode(resp)
|
||||
ix.StreamMU.Unlock()
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// handleNodeDelete processes a signed delete (tombstone) request from a peer.
|
||||
// It verifies that the request is:
|
||||
// - marked as a tombstone
|
||||
// - recent (within 5 minutes, preventing replay attacks)
|
||||
// - sent by the actual peer whose record is being deleted (PeerID == remotePeer)
|
||||
// - signed by the matching private key
|
||||
//
|
||||
// On success it stores the tombstone in the DHT, evicts the peer from the local
|
||||
// stream records, and marks the DID in deletedDIDs so AfterHeartbeat cannot
|
||||
// accidentally republish the record during the tombstoneTTL window.
|
||||
func (ix *IndexerService) handleNodeDelete(s network.Stream) {
|
||||
defer s.Close()
|
||||
logger := oclib.GetLogger()
|
||||
remotePeer := s.Conn().RemotePeer()
|
||||
s.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
|
||||
var ts TombstoneRecord
|
||||
if err := json.NewDecoder(s).Decode(&ts); err != nil || !ts.Tombstone {
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if ts.PeerID == "" || ts.DID == "" {
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if time.Since(ts.DeletedAt) > 5*time.Minute {
|
||||
logger.Warn().Str("peer", remotePeer.String()).Msg("[delete] stale tombstone rejected")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if ts.PeerID != remotePeer.String() {
|
||||
logger.Warn().Str("peer", remotePeer.String()).Msg("[delete] tombstone PeerID mismatch")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
if _, err := ts.Verify(); err != nil {
|
||||
logger.Warn().Err(err).Str("peer", remotePeer.String()).Msg("[delete] invalid tombstone signature")
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
// Mark DID as deleted in-memory before writing to DHT so AfterHeartbeat
|
||||
// cannot win a race and republish the live record on top of the tombstone.
|
||||
ix.deletedDIDsMu.Lock()
|
||||
ix.deletedDIDs[ts.DID] = ts.DeletedAt
|
||||
ix.deletedDIDsMu.Unlock()
|
||||
|
||||
data, _ := json.Marshal(ts)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx, ix.genKey(ts.DID), data); err != nil {
|
||||
logger.Warn().Err(err).Str("did", ts.DID).Msg("[delete] DHT write tombstone failed")
|
||||
}
|
||||
cancel()
|
||||
|
||||
// Invalidate the /pid/ secondary index so isPeerKnown returns false quickly.
|
||||
ctx2, cancel2 := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
if err := ix.DHT.PutValue(ctx2, ix.genPIDKey(ts.PeerID), []byte("")); err != nil {
|
||||
logger.Warn().Err(err).Str("pid", ts.PeerID).Msg("[delete] DHT clear pid failed")
|
||||
}
|
||||
cancel2()
|
||||
|
||||
// Evict from active stream records.
|
||||
if pid, err := lpp.Decode(ts.PeerID); err == nil {
|
||||
ix.StreamMU.Lock()
|
||||
delete(ix.StreamRecords[common.ProtocolHeartbeat], pid)
|
||||
ix.StreamMU.Unlock()
|
||||
}
|
||||
|
||||
logger.Info().Str("did", ts.DID).Str("peer", ts.PeerID).Msg("[delete] tombstone stored, peer evicted")
|
||||
}
|
||||
|
||||
// handleIndirectProbe is the SWIM inter-indexer probe handler.
|
||||
// A node opens this stream toward a live indexer to ask: "can you reach peer X?"
|
||||
// The indexer attempts a ProtocolBandwidthProbe to X and reports back.
|
||||
// This is the only protocol that indexers use to communicate with each other;
|
||||
// no persistent inter-indexer connections are maintained.
|
||||
func (ix *IndexerService) handleIndirectProbe(s network.Stream) {
|
||||
defer s.Close()
|
||||
s.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
|
||||
var req common.IndirectProbeRequest
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil {
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
|
||||
respond := func(reachable bool, latencyMs int64) {
|
||||
json.NewEncoder(s).Encode(common.IndirectProbeResponse{
|
||||
Reachable: reachable,
|
||||
LatencyMs: latencyMs,
|
||||
})
|
||||
}
|
||||
|
||||
// Connect to target if not already connected.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 6*time.Second)
|
||||
defer cancel()
|
||||
if ix.Host.Network().Connectedness(req.Target.ID) != network.Connected {
|
||||
if err := ix.Host.Connect(ctx, req.Target); err != nil {
|
||||
respond(false, 0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Open a bandwidth probe stream — already registered on all nodes/indexers.
|
||||
start := time.Now()
|
||||
ps, err := ix.Host.NewStream(ctx, req.Target.ID, common.ProtocolBandwidthProbe)
|
||||
if err != nil {
|
||||
respond(false, 0)
|
||||
return
|
||||
}
|
||||
defer ps.Reset()
|
||||
ps.SetDeadline(time.Now().Add(3 * time.Second))
|
||||
ps.Write([]byte("ping"))
|
||||
buf := make([]byte, 4)
|
||||
_, err = ps.Read(buf)
|
||||
latency := time.Since(start).Milliseconds()
|
||||
respond(err == nil, latency)
|
||||
}
|
||||
|
||||
248
daemons/node/indexer/search.go
Normal file
248
daemons/node/indexer/search.go
Normal file
@@ -0,0 +1,248 @@
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
const TopicSearchPeer = "oc-search-peer"
|
||||
|
||||
// searchTimeout returns the configured search timeout, defaulting to 5s.
|
||||
func searchTimeout() time.Duration {
|
||||
if t := conf.GetConfig().SearchTimeout; t > 0 {
|
||||
return time.Duration(t) * time.Second
|
||||
}
|
||||
return 5 * time.Second
|
||||
}
|
||||
|
||||
// initSearchHandlers registers ProtocolSearchPeer and ProtocolSearchPeerResponse
|
||||
// and subscribes to TopicSearchPeer on GossipSub.
|
||||
func (ix *IndexerService) initSearchHandlers() {
|
||||
ix.Host.SetStreamHandler(common.ProtocolSearchPeer, ix.handleSearchPeer)
|
||||
ix.Host.SetStreamHandler(common.ProtocolSearchPeerResponse, ix.handleSearchPeerResponse)
|
||||
ix.initSearchSubscription()
|
||||
}
|
||||
|
||||
// updateReferent is called from HandleHeartbeat when Referent flag changes.
|
||||
// If referent=true the node is added to referencedNodes; if false it is removed.
|
||||
func (ix *IndexerService) updateReferent(pid pp.ID, rec PeerRecord, referent bool) {
|
||||
ix.referencedNodesMu.Lock()
|
||||
defer ix.referencedNodesMu.Unlock()
|
||||
if referent {
|
||||
ix.referencedNodes[pid] = rec
|
||||
} else {
|
||||
delete(ix.referencedNodes, pid)
|
||||
}
|
||||
}
|
||||
|
||||
// searchReferenced looks up nodes in referencedNodes matching the query.
|
||||
// Matches on peerID (exact), DID (exact), or name (case-insensitive contains).
|
||||
func (ix *IndexerService) searchReferenced(peerID, did, name string) []PeerRecord {
|
||||
ix.referencedNodesMu.RLock()
|
||||
defer ix.referencedNodesMu.RUnlock()
|
||||
nameLow := strings.ToLower(name)
|
||||
var hits []PeerRecord
|
||||
for pid, rec := range ix.referencedNodes {
|
||||
pidStr := pid.String()
|
||||
matchPeerID := peerID != "" && pidStr == peerID
|
||||
matchDID := did != "" && rec.DID == did
|
||||
matchName := name != "" && strings.Contains(strings.ToLower(rec.Name), nameLow)
|
||||
if matchPeerID || matchDID || matchName {
|
||||
rec.ID = rec.DID
|
||||
hits = append(hits, rec)
|
||||
}
|
||||
}
|
||||
return hits
|
||||
}
|
||||
|
||||
type SearchPeerResult struct {
|
||||
QueryID string `json:"query_id"`
|
||||
Records []PeerRecord `json:"records"`
|
||||
}
|
||||
|
||||
// handleSearchPeer is the ProtocolSearchPeer handler.
|
||||
// The node opens this stream, sends a SearchPeerRequest, and reads results
|
||||
// as they stream in. The stream stays open until timeout or node closes it.
|
||||
func (ix *IndexerService) handleSearchPeer(s network.Stream) {
|
||||
fmt.Println("handleSearchPeer")
|
||||
logger := oclib.GetLogger()
|
||||
defer s.Reset()
|
||||
|
||||
if !ix.isPeerKnown(s.Conn().RemotePeer()) {
|
||||
logger.Warn().Str("peer", s.Conn().RemotePeer().String()).Msg("[search] unknown peer, rejecting stream")
|
||||
return
|
||||
}
|
||||
fmt.Println("SearchPeerRequest")
|
||||
var req common.SearchPeerRequest
|
||||
if err := json.NewDecoder(s).Decode(&req); err != nil || req.QueryID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// streamCtx is cancelled when the node closes its end of the stream.
|
||||
streamCtx, streamCancel := context.WithCancel(context.Background())
|
||||
go func() {
|
||||
// Block until the stream is reset/closed, then cancel our context.
|
||||
buf := make([]byte, 1)
|
||||
s.Read(buf) //nolint:errcheck — we only care about EOF/reset
|
||||
streamCancel()
|
||||
}()
|
||||
defer streamCancel()
|
||||
|
||||
resultCh := make(chan []PeerRecord, 16)
|
||||
ix.pendingSearchesMu.Lock()
|
||||
ix.pendingSearches[req.QueryID] = resultCh
|
||||
ix.pendingSearchesMu.Unlock()
|
||||
defer func() {
|
||||
ix.pendingSearchesMu.Lock()
|
||||
delete(ix.pendingSearches, req.QueryID)
|
||||
ix.pendingSearchesMu.Unlock()
|
||||
}()
|
||||
|
||||
// Check own referencedNodes immediately.
|
||||
if hits := ix.searchReferenced(req.PeerID, req.DID, req.Name); len(hits) > 0 {
|
||||
fmt.Println("hits", hits)
|
||||
resultCh <- hits
|
||||
}
|
||||
fmt.Println("publishSearchQuery")
|
||||
// Broadcast search on GossipSub so other indexers can respond.
|
||||
ix.publishSearchQuery(req.QueryID, req.PeerID, req.DID, req.Name)
|
||||
|
||||
// Stream results back to node as they arrive; reset idle timer on each result.
|
||||
enc := json.NewEncoder(s)
|
||||
idleTimer := time.NewTimer(searchTimeout())
|
||||
defer idleTimer.Stop()
|
||||
for {
|
||||
select {
|
||||
case hits := <-resultCh:
|
||||
fmt.Println("resultCh hits", hits)
|
||||
if err := enc.Encode(SearchPeerResult{QueryID: req.QueryID, Records: hits}); err != nil {
|
||||
logger.Debug().Err(err).Msg("[search] stream write failed")
|
||||
return
|
||||
}
|
||||
// Reset idle timeout: keep alive as long as results trickle in.
|
||||
if !idleTimer.Stop() {
|
||||
select {
|
||||
case <-idleTimer.C:
|
||||
default:
|
||||
}
|
||||
}
|
||||
idleTimer.Reset(searchTimeout())
|
||||
case <-idleTimer.C:
|
||||
// No new result within timeout — close gracefully.
|
||||
return
|
||||
case <-streamCtx.Done():
|
||||
// Node closed the stream (new search superseded this one).
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleSearchPeerResponse is the ProtocolSearchPeerResponse handler.
|
||||
// Another indexer opens this stream to deliver hits for a pending queryID.
|
||||
func (ix *IndexerService) handleSearchPeerResponse(s network.Stream) {
|
||||
defer s.Reset()
|
||||
fmt.Println("RECEIVED SEARCH")
|
||||
var result SearchPeerResult
|
||||
if err := json.NewDecoder(s).Decode(&result); err != nil || result.QueryID == "" {
|
||||
return
|
||||
}
|
||||
ix.pendingSearchesMu.Lock()
|
||||
ch := ix.pendingSearches[result.QueryID]
|
||||
ix.pendingSearchesMu.Unlock()
|
||||
fmt.Println("RECEIVED", result.QueryID, ix.pendingSearches[result.QueryID])
|
||||
if ch != nil {
|
||||
select {
|
||||
case ch <- result.Records:
|
||||
default: // channel full, drop — node may be slow
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// publishSearchQuery broadcasts a SearchQuery on TopicSearchPeer.
|
||||
func (ix *IndexerService) publishSearchQuery(queryID, peerID, did, name string) {
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RLock()
|
||||
topic := ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicSearchPeer]
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.RUnlock()
|
||||
if topic == nil {
|
||||
return
|
||||
}
|
||||
q := common.SearchQuery{
|
||||
QueryID: queryID,
|
||||
PeerID: peerID,
|
||||
DID: did,
|
||||
Name: name,
|
||||
EmitterID: ix.Host.ID().String(),
|
||||
}
|
||||
b, err := json.Marshal(q)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_ = topic.Publish(context.Background(), b)
|
||||
}
|
||||
|
||||
// initSearchSubscription joins TopicSearchPeer and dispatches incoming queries.
|
||||
func (ix *IndexerService) initSearchSubscription() {
|
||||
logger := oclib.GetLogger()
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Lock()
|
||||
topic, err := ix.PS.Join(TopicSearchPeer)
|
||||
if err != nil {
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
|
||||
logger.Err(err).Msg("[search] failed to join search topic")
|
||||
return
|
||||
}
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.LongLivedPubSubs[TopicSearchPeer] = topic
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService.PubsubMu.Unlock()
|
||||
|
||||
common.SubscribeEvents(
|
||||
ix.LongLivedStreamRecordedService.LongLivedPubSubService,
|
||||
context.Background(),
|
||||
TopicSearchPeer,
|
||||
-1,
|
||||
func(_ context.Context, q common.SearchQuery, _ string) {
|
||||
ix.onSearchQuery(q)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// onSearchQuery handles an incoming GossipSub search broadcast.
|
||||
// If we have matching referencedNodes, we respond to the emitting indexer.
|
||||
func (ix *IndexerService) onSearchQuery(q common.SearchQuery) {
|
||||
// Don't respond to our own broadcasts.
|
||||
if q.EmitterID == ix.Host.ID().String() {
|
||||
return
|
||||
}
|
||||
fmt.Println("ON SEARCH QUERY")
|
||||
hits := ix.searchReferenced(q.PeerID, q.DID, q.Name)
|
||||
fmt.Println("ON SEARCH QUERY HITS", hits)
|
||||
if len(hits) == 0 {
|
||||
return
|
||||
}
|
||||
emitterID, err := pp.Decode(q.EmitterID)
|
||||
if err != nil {
|
||||
fmt.Println("ON SEARCH QUERY err DECODE", err)
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
s, err := ix.Host.NewStream(ctx, emitterID, common.ProtocolSearchPeerResponse)
|
||||
if err != nil {
|
||||
fmt.Println("ON SEARCH QUERY err NewStream", emitterID, err)
|
||||
return
|
||||
}
|
||||
fmt.Println("ON ", emitterID)
|
||||
defer s.Close()
|
||||
s.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
err = json.NewEncoder(s).Encode(SearchPeerResult{QueryID: q.QueryID, Records: hits})
|
||||
fmt.Println("SEARCH ERR", err)
|
||||
s.CloseWrite()
|
||||
}
|
||||
@@ -2,69 +2,570 @@ package indexer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
pp "cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
dht "github.com/libp2p/go-libp2p-kad-dht"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
record "github.com/libp2p/go-libp2p-record"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// Index Record is the model for the specialized registry of node connected to Indexer
|
||||
type IndexerService struct {
|
||||
*common.LongLivedStreamRecordedService[PeerRecord]
|
||||
PS *pubsub.PubSub
|
||||
isStrictIndexer bool
|
||||
mu sync.RWMutex
|
||||
DisposedPeers map[peer.ID]*common.TopicNodeActivityPub
|
||||
// dhtCacheEntry holds one indexer discovered via DHT for use in suggestion responses.
|
||||
type dhtCacheEntry struct {
|
||||
AI pp.AddrInfo
|
||||
LastSeen time.Time
|
||||
}
|
||||
|
||||
// if a pubsub is given... indexer is also an active oc-node. If not... your a strict indexer
|
||||
// offloadState tracks which nodes we've already proposed migration to.
|
||||
// When an indexer is overloaded (fill rate > offloadThreshold) it only sends
|
||||
// SuggestMigrate to a small batch at a time; peers that don't migrate within
|
||||
// offloadGracePeriod are moved to alreadyTried so a new batch can be picked.
|
||||
type offloadState struct {
|
||||
inBatch map[pp.ID]time.Time // peer → time added to current batch
|
||||
alreadyTried map[pp.ID]struct{} // peers proposed to that didn't migrate
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
const (
|
||||
offloadThreshold = 0.80 // fill rate above which to start offloading
|
||||
offloadBatchSize = 5 // max concurrent "please migrate" proposals
|
||||
offloadGracePeriod = 3 * common.RecommendedHeartbeatInterval
|
||||
)
|
||||
|
||||
// IndexerService manages the indexer node's state: stream records, DHT, pubsub.
|
||||
type IndexerService struct {
|
||||
*common.LongLivedStreamRecordedService[PeerRecord]
|
||||
PS *pubsub.PubSub
|
||||
DHT *dht.IpfsDHT
|
||||
isStrictIndexer bool
|
||||
mu sync.RWMutex
|
||||
dhtProvideCancel context.CancelFunc
|
||||
bornAt time.Time
|
||||
// Passive DHT cache: refreshed every 2 min in background, used for suggestions.
|
||||
dhtCache []dhtCacheEntry
|
||||
dhtCacheMu sync.RWMutex
|
||||
// Offload state for overloaded-indexer migration proposals.
|
||||
offload offloadState
|
||||
// referencedNodes holds nodes that have designated this indexer as their
|
||||
// search referent (Heartbeat.Referent=true). Used for distributed search.
|
||||
referencedNodes map[pp.ID]PeerRecord
|
||||
referencedNodesMu sync.RWMutex
|
||||
// pendingSearches maps queryID → result channel for in-flight searches.
|
||||
pendingSearches map[string]chan []PeerRecord
|
||||
pendingSearchesMu sync.Mutex
|
||||
// behavior tracks per-node compliance (heartbeat rate, publish/get volume,
|
||||
// identity consistency, signature failures).
|
||||
behavior *NodeBehaviorTracker
|
||||
// connGuard limits new-connection bursts to protect public indexers.
|
||||
// deletedDIDs tracks recently tombstoned DIDs to prevent AfterHeartbeat
|
||||
// from republishing records that were explicitly deleted by the peer.
|
||||
// Entries are cleared automatically after tombstoneTTL.
|
||||
deletedDIDs map[string]time.Time
|
||||
deletedDIDsMu sync.RWMutex
|
||||
// SWIM incarnation: incremented when a connecting node signals suspicion via
|
||||
// SuspectedIncarnation. The new value is broadcast back so nodes can clear
|
||||
// their suspect state (refutation mechanism).
|
||||
incarnation atomic.Uint64
|
||||
// eventQueue holds SWIM membership events to be piggybacked on responses
|
||||
// (infection-style dissemination toward connected nodes).
|
||||
eventQueue *common.MembershipEventQueue
|
||||
}
|
||||
|
||||
// NewIndexerService creates an IndexerService.
|
||||
// If ps is nil, this is a strict indexer (no pre-existing gossip sub from a node).
|
||||
func NewIndexerService(h host.Host, ps *pubsub.PubSub, maxNode int) *IndexerService {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("open indexer mode...")
|
||||
var err error
|
||||
ix := &IndexerService{
|
||||
LongLivedStreamRecordedService: common.NewStreamRecordedService[PeerRecord](h, maxNode, false),
|
||||
LongLivedStreamRecordedService: common.NewStreamRecordedService[PeerRecord](h, maxNode),
|
||||
isStrictIndexer: ps == nil,
|
||||
referencedNodes: map[pp.ID]PeerRecord{},
|
||||
pendingSearches: map[string]chan []PeerRecord{},
|
||||
behavior: newNodeBehaviorTracker(),
|
||||
deletedDIDs: make(map[string]time.Time),
|
||||
eventQueue: &common.MembershipEventQueue{},
|
||||
}
|
||||
if ps == nil { // generate your fresh gossip for the flow of killed node... EVERYBODY should know !
|
||||
if ps == nil {
|
||||
ps, err = pubsub.NewGossipSub(context.Background(), ix.Host)
|
||||
if err != nil {
|
||||
panic(err) // can't run your indexer without a propalgation pubsub, of state of node.
|
||||
panic(err) // can't run your indexer without a propagation pubsub
|
||||
}
|
||||
}
|
||||
ix.PS = ps
|
||||
// later TODO : all indexer laucnh a private replica of them self. DEV OPS
|
||||
|
||||
if ix.isStrictIndexer {
|
||||
logger.Info().Msg("connect to indexers as strict indexer...")
|
||||
common.ConnectToIndexers(h, 0, 5, ix.Host.ID()) // TODO : make var to change how many indexers are allowed.
|
||||
logger.Info().Msg("subscribe to node activity as strict indexer...")
|
||||
|
||||
common.ConnectToIndexers(h, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer*2)
|
||||
logger.Info().Msg("subscribe to decentralized search flow as strict indexer...")
|
||||
ix.SubscribeToSearch(ix.PS, nil)
|
||||
}
|
||||
f := func(ctx context.Context, evt common.TopicNodeActivityPub, _ string) {
|
||||
ix.mu.Lock()
|
||||
if evt.NodeActivity == pp.OFFLINE {
|
||||
delete(ix.DisposedPeers, evt.Disposer.ID)
|
||||
go ix.SubscribeToSearch(ix.PS, nil)
|
||||
ix.AllowInbound = func(remotePeer pp.ID, isNew bool) error {
|
||||
/*if ix.behavior.IsBanned(remotePeer) {
|
||||
return errors.New("peer is banned")
|
||||
}*/
|
||||
if isNew {
|
||||
// DB blacklist check: blocks reconnection after EvictPeer + blacklist.
|
||||
/*if !ix.isPeerKnown(remotePeer) {
|
||||
return errors.New("peer is blacklisted or unknown")
|
||||
}*/
|
||||
if !ix.ConnGuard.Allow() {
|
||||
return errors.New("connection rate limit exceeded, retry later")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if evt.NodeActivity == pp.ONLINE {
|
||||
ix.DisposedPeers[evt.Disposer.ID] = &evt
|
||||
}
|
||||
ix.mu.Unlock()
|
||||
}
|
||||
ix.SubscribeToNodeActivity(ix.PS, &f) // now we subscribe to a long run topic named node-activity, to relay message.
|
||||
ix.initNodeHandler() // then listen up on every protocol expected
|
||||
|
||||
ix.LongLivedStreamRecordedService.AfterDelete = func(pid pp.ID, name, did string) {
|
||||
// Remove behavior state for peers that are no longer connected and
|
||||
// have no active ban — keeps memory bounded to the live node set.
|
||||
ix.behavior.Cleanup(pid)
|
||||
}
|
||||
|
||||
// AllowInbound: fired once per stream open, before any heartbeat is decoded.
|
||||
// 1. Reject peers that are currently banned (behavioral strikes).
|
||||
// 2. For genuinely new connections, check the DB blacklist and apply the burst guard.
|
||||
|
||||
// ValidateHeartbeat: fired on every heartbeat tick for an established stream.
|
||||
// Checks heartbeat cadence — rejects if the node is sending too fast.
|
||||
ix.ValidateHeartbeat = func(remotePeer pp.ID) error {
|
||||
return ix.behavior.RecordHeartbeat(remotePeer)
|
||||
}
|
||||
|
||||
// Parse bootstrap peers from configured indexer addresses so the DHT can
|
||||
// find its routing table entries even in a fresh deployment.
|
||||
var bootstrapPeers []pp.AddrInfo
|
||||
for _, addrStr := range strings.Split(conf.GetConfig().IndexerAddresses, ",") {
|
||||
addrStr = strings.TrimSpace(addrStr)
|
||||
if addrStr == "" {
|
||||
continue
|
||||
}
|
||||
if ad, err := pp.AddrInfoFromString(addrStr); err == nil {
|
||||
bootstrapPeers = append(bootstrapPeers, *ad)
|
||||
}
|
||||
}
|
||||
dhtOpts := []dht.Option{
|
||||
dht.Mode(dht.ModeServer),
|
||||
dht.ProtocolPrefix("oc"),
|
||||
dht.Validator(record.NamespacedValidator{
|
||||
"node": PeerRecordValidator{},
|
||||
"name": DefaultValidator{},
|
||||
"pid": DefaultValidator{},
|
||||
}),
|
||||
}
|
||||
if len(bootstrapPeers) > 0 {
|
||||
dhtOpts = append(dhtOpts, dht.BootstrapPeers(bootstrapPeers...))
|
||||
}
|
||||
if ix.DHT, err = dht.New(context.Background(), ix.Host, dhtOpts...); err != nil {
|
||||
logger.Info().Msg(err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make the DHT available for replenishment from other packages.
|
||||
common.SetDiscoveryDHT(ix.DHT)
|
||||
|
||||
ix.bornAt = time.Now().UTC()
|
||||
ix.offload.inBatch = make(map[pp.ID]time.Time)
|
||||
ix.offload.alreadyTried = make(map[pp.ID]struct{})
|
||||
ix.initNodeHandler()
|
||||
|
||||
// Build and send a HeartbeatResponse after each received node heartbeat.
|
||||
// Raw metrics only — no pre-cooked score. Node computes the score itself.
|
||||
ix.BuildHeartbeatResponse = func(remotePeer pp.ID, hb *common.Heartbeat) *common.HeartbeatResponse {
|
||||
logger := oclib.GetLogger()
|
||||
need, challenges, challengeDID, referent, rawRecord :=
|
||||
hb.Need, hb.Challenges, hb.ChallengeDID, hb.Referent, hb.Record
|
||||
|
||||
ix.StreamMU.RLock()
|
||||
peerCount := len(ix.StreamRecords[common.ProtocolHeartbeat])
|
||||
// Collect lastSeen per active peer for challenge responses.
|
||||
type peerMeta struct {
|
||||
found bool
|
||||
lastSeen time.Time
|
||||
}
|
||||
peerLookup := make(map[string]peerMeta, peerCount)
|
||||
var remotePeerRecord PeerRecord
|
||||
for pid, rec := range ix.StreamRecords[common.ProtocolHeartbeat] {
|
||||
var ls time.Time
|
||||
if rec.HeartbeatStream != nil && rec.HeartbeatStream.UptimeTracker != nil {
|
||||
ls = rec.HeartbeatStream.UptimeTracker.LastSeen
|
||||
}
|
||||
peerLookup[pid.String()] = peerMeta{found: true, lastSeen: ls}
|
||||
if pid == remotePeer {
|
||||
remotePeerRecord = rec.Record
|
||||
}
|
||||
}
|
||||
ix.StreamMU.RUnlock()
|
||||
|
||||
// AfterHeartbeat updates srec.Record asynchronously — it may not have run yet.
|
||||
// Use rawRecord (the fresh signed PeerRecord embedded in the heartbeat) directly
|
||||
// so referencedNodes always gets the current Name/DID regardless of timing.
|
||||
if remotePeerRecord.Name == "" && len(rawRecord) > 0 {
|
||||
var fresh PeerRecord
|
||||
if json.Unmarshal(rawRecord, &fresh) == nil {
|
||||
remotePeerRecord = fresh
|
||||
}
|
||||
}
|
||||
|
||||
// Update referent designation: node marks its best-scored indexer with Referent=true.
|
||||
ix.updateReferent(remotePeer, remotePeerRecord, referent)
|
||||
|
||||
// SWIM refutation: if the node signals our current incarnation as suspected,
|
||||
// increment it and broadcast an alive event so other nodes can clear suspicion.
|
||||
inc := ix.incarnation.Load()
|
||||
if hb.SuspectedIncarnation != nil && *hb.SuspectedIncarnation == inc {
|
||||
inc = ix.incarnation.Add(1)
|
||||
logger.Info().
|
||||
Str("suspected_by", remotePeer.String()).
|
||||
Uint64("new_incarnation", inc).
|
||||
Msg("[swim] refuting suspicion — incarnation incremented")
|
||||
ix.eventQueue.Add(common.MemberEvent{
|
||||
Type: common.MemberAlive,
|
||||
PeerID: ix.Host.ID().String(),
|
||||
Incarnation: inc,
|
||||
HopsLeft: common.InitialEventHops,
|
||||
})
|
||||
}
|
||||
|
||||
// Relay incoming SWIM events from the node into our event queue so they
|
||||
// propagate to other connected nodes (infection-style forwarding).
|
||||
for _, ev := range hb.MembershipEvents {
|
||||
if ev.HopsLeft > 0 {
|
||||
ix.eventQueue.Add(ev)
|
||||
}
|
||||
}
|
||||
|
||||
maxN := ix.MaxNodesConn()
|
||||
fillRate := 0.0
|
||||
if maxN > 0 {
|
||||
fillRate = float64(peerCount) / float64(maxN)
|
||||
if fillRate > 1 {
|
||||
fillRate = 1
|
||||
}
|
||||
}
|
||||
|
||||
resp := &common.HeartbeatResponse{
|
||||
FillRate: fillRate,
|
||||
PeerCount: peerCount,
|
||||
MaxNodes: maxN,
|
||||
BornAt: ix.bornAt,
|
||||
}
|
||||
|
||||
// Answer each challenged PeerID with raw found + lastSeen.
|
||||
for _, pidStr := range challenges {
|
||||
meta := peerLookup[pidStr] // zero value if not found
|
||||
entry := common.ChallengeEntry{
|
||||
PeerID: pidStr,
|
||||
Found: meta.found,
|
||||
LastSeen: meta.lastSeen,
|
||||
}
|
||||
resp.Challenges = append(resp.Challenges, entry)
|
||||
}
|
||||
|
||||
// DHT challenge: retrieve the node's own DID to prove DHT is functional.
|
||||
if challengeDID != "" {
|
||||
ctx3, cancel3 := context.WithTimeout(context.Background(), 3*time.Second)
|
||||
val, err := ix.DHT.GetValue(ctx3, "/node/"+challengeDID)
|
||||
cancel3()
|
||||
resp.DHTFound = err == nil
|
||||
if err == nil {
|
||||
resp.DHTPayload = json.RawMessage(val)
|
||||
}
|
||||
}
|
||||
|
||||
// Random sample of connected nodes as witnesses (up to 3).
|
||||
// Never include the requesting peer itself — asking a node to witness
|
||||
// itself is circular and meaningless.
|
||||
ix.StreamMU.RLock()
|
||||
for pidStr := range peerLookup {
|
||||
if len(resp.Witnesses) >= 3 {
|
||||
break
|
||||
}
|
||||
pid, err := pp.Decode(pidStr)
|
||||
if err != nil || pid == remotePeer || pid == ix.Host.ID() {
|
||||
continue
|
||||
}
|
||||
addrs := ix.Host.Peerstore().Addrs(pid)
|
||||
ai := common.FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
|
||||
if len(ai.Addrs) > 0 {
|
||||
resp.Witnesses = append(resp.Witnesses, ai)
|
||||
}
|
||||
}
|
||||
ix.StreamMU.RUnlock()
|
||||
|
||||
// Attach suggestions: exactly `need` entries from the DHT cache.
|
||||
// If the indexer is overloaded (SuggestMigrate will be set below), always
|
||||
// provide at least 1 suggestion even when need == 0, so the node has
|
||||
// somewhere to go.
|
||||
suggestionsNeeded := need
|
||||
if fillRate > offloadThreshold && suggestionsNeeded < 1 {
|
||||
suggestionsNeeded = 1
|
||||
}
|
||||
if suggestionsNeeded > 0 {
|
||||
ix.dhtCacheMu.RLock()
|
||||
// When offloading, pick from a random offset within the top N of the
|
||||
// cache so concurrent migrations spread across multiple targets rather
|
||||
// than all rushing to the same least-loaded indexer (thundering herd).
|
||||
// For normal need-based suggestions the full sorted order is fine.
|
||||
cache := ix.dhtCache
|
||||
if fillRate > offloadThreshold && len(cache) > suggestionsNeeded {
|
||||
const spreadWindow = 5 // sample from the top-5 least-loaded
|
||||
window := spreadWindow
|
||||
if window > len(cache) {
|
||||
window = len(cache)
|
||||
}
|
||||
start := rand.Intn(window)
|
||||
cache = cache[start:]
|
||||
}
|
||||
for _, e := range cache {
|
||||
if len(resp.Suggestions) >= suggestionsNeeded {
|
||||
break
|
||||
}
|
||||
// Never suggest the requesting peer itself or this indexer.
|
||||
if e.AI.ID == remotePeer || e.AI.ID == h.ID() {
|
||||
continue
|
||||
}
|
||||
resp.Suggestions = append(resp.Suggestions, e.AI)
|
||||
}
|
||||
ix.dhtCacheMu.RUnlock()
|
||||
}
|
||||
|
||||
// Offload logic: when fill rate is too high, selectively ask nodes to migrate.
|
||||
if fillRate > offloadThreshold && len(resp.Suggestions) > 0 {
|
||||
now := time.Now()
|
||||
ix.offload.mu.Lock()
|
||||
// Expire stale batch entries -> move to alreadyTried.
|
||||
for pid, addedAt := range ix.offload.inBatch {
|
||||
if now.Sub(addedAt) > offloadGracePeriod {
|
||||
ix.offload.alreadyTried[pid] = struct{}{}
|
||||
delete(ix.offload.inBatch, pid)
|
||||
}
|
||||
}
|
||||
// Reset alreadyTried if we've exhausted the whole pool.
|
||||
if len(ix.offload.alreadyTried) >= peerCount {
|
||||
ix.offload.alreadyTried = make(map[pp.ID]struct{})
|
||||
}
|
||||
_, tried := ix.offload.alreadyTried[remotePeer]
|
||||
_, inBatch := ix.offload.inBatch[remotePeer]
|
||||
if !tried {
|
||||
if inBatch {
|
||||
resp.SuggestMigrate = true
|
||||
} else if len(ix.offload.inBatch) < offloadBatchSize {
|
||||
ix.offload.inBatch[remotePeer] = now
|
||||
resp.SuggestMigrate = true
|
||||
}
|
||||
}
|
||||
ix.offload.mu.Unlock()
|
||||
} else if fillRate <= offloadThreshold {
|
||||
// Fill rate back to normal: reset offload state.
|
||||
ix.offload.mu.Lock()
|
||||
if len(ix.offload.inBatch) > 0 || len(ix.offload.alreadyTried) > 0 {
|
||||
ix.offload.inBatch = make(map[pp.ID]time.Time)
|
||||
ix.offload.alreadyTried = make(map[pp.ID]struct{})
|
||||
}
|
||||
ix.offload.mu.Unlock()
|
||||
}
|
||||
|
||||
// Bootstrap: if this indexer has no indexers of its own, probe the
|
||||
// connecting peer to check it supports ProtocolHeartbeat (i.e. it is
|
||||
// itself an indexer). Plain nodes do not register the handler and the
|
||||
// negotiation fails instantly — no wasted heartbeat cycle.
|
||||
// Run in a goroutine: the probe is a short blocking stream open.
|
||||
if len(common.Indexers.GetAddrs()) == 0 && remotePeer != h.ID() {
|
||||
pid := remotePeer
|
||||
go func() {
|
||||
if !common.SupportsHeartbeat(h, pid) {
|
||||
logger.Debug().Str("peer", pid.String()).
|
||||
Msg("[bootstrap] inbound peer has no heartbeat handler — not an indexer, skipping")
|
||||
return
|
||||
}
|
||||
addrs := h.Peerstore().Addrs(pid)
|
||||
ai := common.FilterLoopbackAddrs(pp.AddrInfo{ID: pid, Addrs: addrs})
|
||||
if len(ai.Addrs) == 0 {
|
||||
return
|
||||
}
|
||||
key := pid.String()
|
||||
if !common.Indexers.ExistsAddr(key) {
|
||||
adCopy := ai
|
||||
common.Indexers.SetAddr(key, &adCopy)
|
||||
common.Indexers.NudgeIt()
|
||||
logger.Info().Str("peer", key).Msg("[bootstrap] no indexers — added inbound indexer peer as candidate")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Attach SWIM incarnation and piggybacked membership events.
|
||||
resp.Incarnation = ix.incarnation.Load()
|
||||
resp.MembershipEvents = ix.eventQueue.Drain(5)
|
||||
|
||||
return resp
|
||||
}
|
||||
|
||||
// Advertise this indexer in the DHT so nodes can discover it.
|
||||
fillRateFn := func() float64 {
|
||||
ix.StreamMU.RLock()
|
||||
n := len(ix.StreamRecords[common.ProtocolHeartbeat])
|
||||
ix.StreamMU.RUnlock()
|
||||
maxN := ix.MaxNodesConn()
|
||||
if maxN <= 0 {
|
||||
return 0
|
||||
}
|
||||
rate := float64(n) / float64(maxN)
|
||||
if rate > 1 {
|
||||
rate = 1
|
||||
}
|
||||
return rate
|
||||
}
|
||||
ix.startDHTCacheRefresh()
|
||||
ix.startDHTProvide(fillRateFn)
|
||||
|
||||
return ix
|
||||
}
|
||||
|
||||
// startDHTCacheRefresh periodically queries the DHT for peer indexers and
|
||||
// refreshes ix.dhtCache. This passive cache is used by BuildHeartbeatResponse
|
||||
// to suggest better indexers to connected nodes without any per-request cost.
|
||||
func (ix *IndexerService) startDHTCacheRefresh() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Store cancel alongside the provide cancel so Close() stops both.
|
||||
prevCancel := ix.dhtProvideCancel
|
||||
ix.dhtProvideCancel = func() {
|
||||
if prevCancel != nil {
|
||||
prevCancel()
|
||||
}
|
||||
cancel()
|
||||
}
|
||||
go func() {
|
||||
logger := oclib.GetLogger()
|
||||
refresh := func() {
|
||||
if ix.DHT == nil {
|
||||
return
|
||||
}
|
||||
// Fetch more than needed so SelectByFillRate can filter for diversity.
|
||||
raw := common.DiscoverIndexersFromDHT(ix.Host, ix.DHT, 30)
|
||||
if len(raw) == 0 {
|
||||
return
|
||||
}
|
||||
// Remove self before selection.
|
||||
filtered := raw[:0]
|
||||
for _, ai := range raw {
|
||||
if ai.ID != ix.Host.ID() {
|
||||
filtered = append(filtered, ai)
|
||||
}
|
||||
}
|
||||
// SelectByFillRate applies /24 subnet diversity and fill-rate weighting.
|
||||
// Fill rates are unknown at this stage (nil map) so all peers get
|
||||
// the neutral prior f=0.5 — diversity filtering still applies.
|
||||
selected := common.SelectByFillRate(filtered, nil, 10)
|
||||
now := time.Now()
|
||||
ix.dhtCacheMu.Lock()
|
||||
ix.dhtCache = ix.dhtCache[:0]
|
||||
for _, ai := range selected {
|
||||
ix.dhtCache = append(ix.dhtCache, dhtCacheEntry{AI: ai, LastSeen: now})
|
||||
}
|
||||
ix.dhtCacheMu.Unlock()
|
||||
logger.Info().Int("cached", len(selected)).Msg("[dht] indexer suggestion cache refreshed")
|
||||
}
|
||||
// Initial delay: let the DHT routing table warm up first.
|
||||
select {
|
||||
case <-time.After(30 * time.Second):
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
refresh()
|
||||
t := time.NewTicker(2 * time.Minute)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
refresh()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// startDHTProvide bootstraps the DHT and starts a goroutine that periodically
|
||||
// advertises this indexer under the well-known provider key.
|
||||
func (ix *IndexerService) startDHTProvide(fillRateFn func() float64) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ix.dhtProvideCancel = cancel
|
||||
go func() {
|
||||
logger := oclib.GetLogger()
|
||||
// Wait until a routable (non-loopback) address is available.
|
||||
for i := 0; i < 12; i++ {
|
||||
addrs := ix.Host.Addrs()
|
||||
if len(addrs) > 0 && !strings.Contains(addrs[len(addrs)-1].String(), "127.0.0.1") {
|
||||
break
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
if err := ix.DHT.Bootstrap(ctx); err != nil {
|
||||
logger.Warn().Err(err).Msg("[dht] bootstrap failed")
|
||||
}
|
||||
provide := func() {
|
||||
pCtx, pCancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer pCancel()
|
||||
if err := ix.DHT.Provide(pCtx, common.IndexerCID(), true); err != nil {
|
||||
logger.Warn().Err(err).Msg("[dht] Provide failed")
|
||||
} else {
|
||||
logger.Info().Float64("fill_rate", fillRateFn()).Msg("[dht] indexer advertised in DHT")
|
||||
}
|
||||
}
|
||||
provide()
|
||||
t := time.NewTicker(common.RecommendedHeartbeatInterval)
|
||||
defer t.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-t.C:
|
||||
provide()
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// EvictPeer immediately closes the heartbeat stream of a peer and removes it
|
||||
// from the active stream records. Used when a peer is auto-blacklisted.
|
||||
func (ix *IndexerService) EvictPeer(peerID string) {
|
||||
pid, err := pp.Decode(peerID)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ix.StreamMU.Lock()
|
||||
defer ix.StreamMU.Unlock()
|
||||
if rec, ok := ix.StreamRecords[common.ProtocolHeartbeat][pid]; ok {
|
||||
if rec.HeartbeatStream != nil && rec.HeartbeatStream.Stream != nil {
|
||||
rec.HeartbeatStream.Stream.Reset()
|
||||
}
|
||||
delete(ix.StreamRecords[common.ProtocolHeartbeat], pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (ix *IndexerService) Close() {
|
||||
if ix.dhtProvideCancel != nil {
|
||||
ix.dhtProvideCancel()
|
||||
}
|
||||
ix.DHT.Close()
|
||||
ix.PS.UnregisterTopicValidator(common.TopicPubSubSearch)
|
||||
for _, s := range ix.StreamRecords {
|
||||
for _, ss := range s {
|
||||
ss.Stream.Close()
|
||||
ss.HeartbeatStream.Stream.Close()
|
||||
}
|
||||
}
|
||||
|
||||
93
daemons/node/indexer/validator.go
Normal file
93
daemons/node/indexer/validator.go
Normal file
@@ -0,0 +1,93 @@
|
||||
package indexer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
type DefaultValidator struct{}
|
||||
|
||||
func (v DefaultValidator) Validate(key string, value []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v DefaultValidator) Select(key string, values [][]byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
type PeerRecordValidator struct{}
|
||||
|
||||
func (v PeerRecordValidator) Validate(key string, value []byte) error {
|
||||
// Accept valid tombstones — deletion must be storable so it can propagate
|
||||
// and win over stale live records on other DHT nodes via Select().
|
||||
var ts TombstoneRecord
|
||||
if err := json.Unmarshal(value, &ts); err == nil && ts.Tombstone {
|
||||
if ts.PeerID == "" || ts.DID == "" {
|
||||
return errors.New("tombstone: missing fields")
|
||||
}
|
||||
if time.Since(ts.DeletedAt) > tombstoneTTL {
|
||||
return errors.New("tombstone: expired")
|
||||
}
|
||||
if _, err := ts.Verify(); err != nil {
|
||||
return errors.New("tombstone: " + err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var rec PeerRecord
|
||||
if err := json.Unmarshal(value, &rec); err != nil {
|
||||
return errors.New("invalid json")
|
||||
}
|
||||
|
||||
// PeerID must exist
|
||||
if rec.PeerID == "" {
|
||||
return errors.New("missing peerID")
|
||||
}
|
||||
|
||||
// Expiry check
|
||||
if rec.ExpiryDate.Before(time.Now().UTC()) {
|
||||
return errors.New("record expired")
|
||||
}
|
||||
|
||||
// TTL cap: publisher cannot set an expiry further than maxTTLSeconds in
|
||||
// the future. Prevents abuse (e.g. records designed to linger for years).
|
||||
if rec.ExpiryDate.After(time.Now().UTC().Add(maxTTLSeconds * time.Second)) {
|
||||
return errors.New("TTL exceeds maximum allowed")
|
||||
}
|
||||
|
||||
// Signature verification
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v PeerRecordValidator) Select(key string, values [][]byte) (int, error) {
|
||||
// Tombstone always wins: a signed delete supersedes any live record,
|
||||
// even if the live record has a later ExpiryDate.
|
||||
for i, val := range values {
|
||||
var ts TombstoneRecord
|
||||
if err := json.Unmarshal(val, &ts); err == nil && ts.Tombstone {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
var newest time.Time
|
||||
index := 0
|
||||
|
||||
for i, val := range values {
|
||||
var rec PeerRecord
|
||||
if err := json.Unmarshal(val, &rec); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if rec.ExpiryDate.After(newest) {
|
||||
newest = rec.ExpiryDate
|
||||
index = i
|
||||
}
|
||||
}
|
||||
|
||||
return index, nil
|
||||
}
|
||||
99
daemons/node/location/location.go
Normal file
99
daemons/node/location/location.go
Normal file
@@ -0,0 +1,99 @@
|
||||
// Package location resolves the geographic position of this node via IP
|
||||
// geolocation and applies a privacy-preserving random offset proportional
|
||||
// to the chosen granularity level before publishing the result.
|
||||
package location
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
peer "cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
)
|
||||
|
||||
// fuzzRadius returns the maximum random offset (in degrees) for each axis
|
||||
// given a granularity level.
|
||||
//
|
||||
// 0 → no location
|
||||
// 1 → continent ±15° lat / ±20° lng
|
||||
// 2 → country ±3° lat / ±4° lng (default)
|
||||
// 3 → region ±0.5° lat / ±0.7° lng
|
||||
// 4 → city ±0.05° lat / ±0.07° lng
|
||||
func fuzzRadius(granularity int) (latR, lngR float64) {
|
||||
switch granularity {
|
||||
case 1:
|
||||
return 15.0, 20.0
|
||||
case 2:
|
||||
return 3.0, 4.0
|
||||
case 3:
|
||||
return 0.5, 0.7
|
||||
case 4:
|
||||
return 0.05, 0.07
|
||||
default:
|
||||
return 3.0, 4.0
|
||||
}
|
||||
}
|
||||
|
||||
// clamp keeps a value inside [min, max].
|
||||
func clamp(v, min, max float64) float64 {
|
||||
if v < min {
|
||||
return min
|
||||
}
|
||||
if v > max {
|
||||
return max
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// ipAPIResponse is the subset of fields returned by ip-api.com/json.
|
||||
type ipAPIResponse struct {
|
||||
Status string `json:"status"`
|
||||
Lat float64 `json:"lat"`
|
||||
Lon float64 `json:"lon"`
|
||||
Country string `json:"country"`
|
||||
Region string `json:"regionName"`
|
||||
City string `json:"city"`
|
||||
}
|
||||
|
||||
// Geolocate resolves the current public IP location via ip-api.com (free,
|
||||
// no key required for non-commercial use), then fuzzes the result according
|
||||
// to granularity.
|
||||
//
|
||||
// Returns nil if granularity == 0 (opt-out) or if the lookup fails.
|
||||
func Geolocate(granularity int) *peer.PeerLocation {
|
||||
if granularity == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
client := &http.Client{Timeout: 5 * time.Second}
|
||||
resp, err := client.Get("http://ip-api.com/json?fields=status,lat,lon,country,regionName,city")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
var result ipAPIResponse
|
||||
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil || result.Status != "success" {
|
||||
return nil
|
||||
}
|
||||
|
||||
latR, lngR := fuzzRadius(granularity)
|
||||
rng := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
fuzzedLat := result.Lat + (rng.Float64()*2-1)*latR
|
||||
fuzzedLng := result.Lon + (rng.Float64()*2-1)*lngR
|
||||
|
||||
fuzzedLat = clamp(fuzzedLat, -85.0, 85.0)
|
||||
fuzzedLng = clamp(fuzzedLng, -180.0, 180.0)
|
||||
|
||||
fmt.Printf("[location] granularity=%d raw=(%.4f,%.4f) fuzzed=(%.4f,%.4f)\n",
|
||||
granularity, result.Lat, result.Lon, fuzzedLat, fuzzedLng)
|
||||
|
||||
return &peer.PeerLocation{
|
||||
Latitude: fuzzedLat,
|
||||
Longitude: fuzzedLng,
|
||||
Granularity: granularity,
|
||||
}
|
||||
}
|
||||
@@ -4,15 +4,46 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"oc-discovery/daemons/node/indexer"
|
||||
"oc-discovery/daemons/node/stream"
|
||||
"slices"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
pp_model "cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
func ListenNATS(n Node) {
|
||||
type configPayload struct {
|
||||
PeerID string `json:"source_peer_id"`
|
||||
}
|
||||
|
||||
type executionConsidersPayload struct {
|
||||
PeerIDs []string `json:"peer_ids"`
|
||||
}
|
||||
|
||||
func ListenNATS(n *Node) {
|
||||
tools.NewNATSCaller().ListenNats(map[tools.NATSMethod]func(tools.NATSResponse){
|
||||
tools.PEER_BEHAVIOR_EVENT: func(resp tools.NATSResponse) { //nolint:typecheck
|
||||
handlePeerBehaviorEvent(n, resp)
|
||||
},
|
||||
// PEER_OBSERVE_EVENT is sent by oc-peer to start or stop observations
|
||||
// for a list of peer IDs, or to trigger a close-all.
|
||||
tools.PEER_OBSERVE_EVENT: func(resp tools.NATSResponse) {
|
||||
n.StreamService.HandleObserveNATSCommand(resp)
|
||||
},
|
||||
tools.PROPALGATION_EVENT: func(resp tools.NATSResponse) {
|
||||
if resp.FromApp == config.GetAppName() {
|
||||
return
|
||||
}
|
||||
p, err := oclib.GetMySelf()
|
||||
if err != nil || p == nil || p.PeerID != n.PeerID.String() {
|
||||
return
|
||||
}
|
||||
var propalgation tools.PropalgationMessage
|
||||
err := json.Unmarshal(resp.Payload, &propalgation)
|
||||
err = json.Unmarshal(resp.Payload, &propalgation)
|
||||
var dt *tools.DataType
|
||||
if propalgation.DataType > 0 {
|
||||
dtt := tools.DataType(propalgation.DataType)
|
||||
@@ -20,27 +51,248 @@ func ListenNATS(n Node) {
|
||||
}
|
||||
if err == nil {
|
||||
switch propalgation.Action {
|
||||
case tools.PB_CREATE:
|
||||
case tools.PB_UPDATE:
|
||||
case tools.PB_DELETE:
|
||||
n.StreamService.ToPartnerPublishEvent(
|
||||
context.Background(),
|
||||
propalgation.Action,
|
||||
dt, resp.User,
|
||||
propalgation.Payload,
|
||||
)
|
||||
case tools.PB_SEARCH:
|
||||
case tools.PB_ADMIRALTY_CONFIG, tools.PB_MINIO_CONFIG:
|
||||
var m configPayload
|
||||
var proto protocol.ID = stream.ProtocolAdmiraltyConfigResource
|
||||
if propalgation.Action == tools.PB_MINIO_CONFIG {
|
||||
proto = stream.ProtocolMinioConfigResource
|
||||
}
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
|
||||
peers, _ := n.GetPeerRecord(context.Background(), m.PeerID)
|
||||
for _, p := range peers {
|
||||
n.StreamService.PublishCommon(&resp.Datatype, resp.User, resp.Groups,
|
||||
p.PeerID, proto, propalgation.Payload)
|
||||
}
|
||||
}
|
||||
case tools.PB_CREATE, tools.PB_UPDATE, tools.PB_DELETE:
|
||||
if slices.Contains([]tools.DataType{tools.BOOKING, tools.PURCHASE_RESOURCE}, resp.Datatype) {
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
|
||||
if m["peer_id"] != nil {
|
||||
n.StreamService.PublishCommon(&resp.Datatype, resp.User, resp.Groups,
|
||||
fmt.Sprintf("%v", m["peer_id"]), stream.ProtocolCreateResource, propalgation.Payload)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println(n.StreamService.ToPartnerPublishEvent(
|
||||
context.Background(),
|
||||
propalgation.Action,
|
||||
dt, resp.User, resp.Groups,
|
||||
propalgation.Payload,
|
||||
))
|
||||
}
|
||||
case tools.PB_CONSIDERS:
|
||||
switch resp.Datatype {
|
||||
case tools.BOOKING, tools.PURCHASE_RESOURCE, tools.WORKFLOW_EXECUTION:
|
||||
var m executionConsidersPayload
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
|
||||
for _, p := range m.PeerIDs {
|
||||
peers, _ := n.GetPeerRecord(context.Background(), p)
|
||||
for _, pp := range peers {
|
||||
n.StreamService.PublishCommon(&resp.Datatype, resp.User, resp.Groups,
|
||||
pp.PeerID, stream.ProtocolConsidersResource, propalgation.Payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
// minio / admiralty config considers — route back to OriginID.
|
||||
var m struct {
|
||||
OriginID string `json:"origin_id"`
|
||||
}
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil && m.OriginID != "" {
|
||||
peers, _ := n.GetPeerRecord(context.Background(), m.OriginID)
|
||||
for _, p := range peers {
|
||||
n.StreamService.PublishCommon(nil, resp.User, resp.Groups,
|
||||
p.PeerID, stream.ProtocolConsidersResource, propalgation.Payload)
|
||||
}
|
||||
}
|
||||
}
|
||||
case tools.PB_PLANNER:
|
||||
m := map[string]interface{}{}
|
||||
json.Unmarshal(propalgation.Payload, &m)
|
||||
n.PubSubService.SearchPublishEvent(
|
||||
context.Background(),
|
||||
dt,
|
||||
fmt.Sprintf("%v", m["type"]),
|
||||
resp.User,
|
||||
fmt.Sprintf("%v", m["search"]),
|
||||
)
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
|
||||
b := []byte{}
|
||||
if len(m) > 1 {
|
||||
b = propalgation.Payload
|
||||
}
|
||||
if m["peer_id"] == nil { // send to every active stream
|
||||
n.StreamService.Mu.Lock()
|
||||
if n.StreamService.Streams[stream.ProtocolSendPlanner] != nil {
|
||||
for pid := range n.StreamService.Streams[stream.ProtocolSendPlanner] { // send Planner can be long lived - it's a conn
|
||||
n.StreamService.PublishCommon(nil, resp.User, resp.Groups, pid.String(), stream.ProtocolSendPlanner, b)
|
||||
}
|
||||
}
|
||||
n.StreamService.Mu.Unlock()
|
||||
} else {
|
||||
fmt.Println("REACH PLANNER")
|
||||
n.StreamService.PublishCommon(nil, resp.User, resp.Groups, fmt.Sprintf("%v", m["peer_id"]), stream.ProtocolSendPlanner, b)
|
||||
}
|
||||
}
|
||||
case tools.PB_CLOSE_PLANNER:
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(resp.Payload, &m); err == nil {
|
||||
n.StreamService.Mu.Lock()
|
||||
if pid, err := pp.Decode(fmt.Sprintf("%v", m["peer_id"])); err == nil {
|
||||
if n.StreamService.Streams[stream.ProtocolSendPlanner] != nil && n.StreamService.Streams[stream.ProtocolSendPlanner][pid] != nil {
|
||||
n.StreamService.Streams[stream.ProtocolSendPlanner][pid].Stream.Close()
|
||||
delete(n.StreamService.Streams[stream.ProtocolSendPlanner], pid)
|
||||
}
|
||||
}
|
||||
n.StreamService.Mu.Unlock()
|
||||
}
|
||||
case tools.PB_OBSERVE:
|
||||
print("PROPALGATE OBSERVE")
|
||||
handleObserveEvent(n, propalgation)
|
||||
case tools.PB_OBSERVE_CLOSE:
|
||||
print("PROPALGATE CLOSE")
|
||||
handleObserveCloseEvent(n, propalgation)
|
||||
case tools.PB_PROPAGATE:
|
||||
// Another oc-discovery forwarded a heartbeat batch.
|
||||
// Re-emit on PEER_OBSERVE_RESPONSE_EVENT so the local oc-peer sees it.
|
||||
tools.NewNATSCaller().SetNATSPub(tools.PEER_OBSERVE_RESPONSE_EVENT, tools.NATSResponse{
|
||||
FromApp: resp.FromApp,
|
||||
Datatype: tools.PEER,
|
||||
Method: int(tools.PEER_OBSERVE_RESPONSE_EVENT),
|
||||
Payload: propalgation.Payload,
|
||||
})
|
||||
case tools.PB_CLOSE_SEARCH:
|
||||
if propalgation.DataType == int(tools.PEER) {
|
||||
n.peerSearches.Cancel(resp.User)
|
||||
} else {
|
||||
n.StreamService.ResourceSearches.Cancel(resp.User)
|
||||
}
|
||||
case tools.PB_SEARCH:
|
||||
fmt.Println("PROPALGATE PEER")
|
||||
if propalgation.DataType == int(tools.PEER) {
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
|
||||
needle := fmt.Sprintf("%v", m["search"])
|
||||
userKey := resp.User
|
||||
go n.SearchPeerRecord(userKey, needle, func(hit indexer.PeerRecord) {
|
||||
if b, err := json.Marshal(hit); err == nil {
|
||||
tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(tools.PEER),
|
||||
User: userKey,
|
||||
Method: int(tools.SEARCH_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
} else {
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(propalgation.Payload, &m); err == nil {
|
||||
fmt.Println("PB_SEARCH CATA", m)
|
||||
|
||||
n.PubSubService.SearchPublishEvent(
|
||||
context.Background(),
|
||||
dt,
|
||||
fmt.Sprintf("%v", m["type"]),
|
||||
resp.User, resp.Groups,
|
||||
fmt.Sprintf("%v", m["search"]),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// handlePeerBehaviorEvent applies a PeerBehaviorReport received from a trusted
|
||||
// service (oc-scheduler, oc-datacenter, …). It:
|
||||
// 1. Loads the target peer from the local DB.
|
||||
// 2. Deducts the trust penalty and appends a BehaviorWarning.
|
||||
// 3. Auto-blacklists and evicts the peer stream when TrustScore ≤ threshold.
|
||||
//
|
||||
// oc-discovery does NOT re-emit a PROPALGATION_EVENT: propagation is strictly
|
||||
// inbound (oc-catalog → oc-discovery). The blacklist takes effect locally at
|
||||
// the next isPeerKnown() call, and immediately via EvictPeer().
|
||||
func handlePeerBehaviorEvent(n *Node, resp tools.NATSResponse) {
|
||||
var report tools.PeerBehaviorReport
|
||||
if err := json.Unmarshal(resp.Payload, &report); err != nil {
|
||||
fmt.Println("handlePeerBehaviorEvent: unmarshal error:", err)
|
||||
return
|
||||
}
|
||||
if report.TargetPeerID == "" {
|
||||
return
|
||||
}
|
||||
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
data := access.LoadOne(report.TargetPeerID)
|
||||
if data.Data == nil {
|
||||
fmt.Println("handlePeerBehaviorEvent: peer not found:", report.TargetPeerID)
|
||||
return
|
||||
}
|
||||
p := data.ToPeer()
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
// Self-protection: never penalise ourselves.
|
||||
if self, err := oclib.GetMySelf(); err == nil && self != nil && self.GetID() == p.GetID() {
|
||||
return
|
||||
}
|
||||
|
||||
shouldBlacklist := p.ApplyBehaviorReport(report)
|
||||
if shouldBlacklist && p.Relation != pp_model.BLACKLIST {
|
||||
p.Relation = pp_model.BLACKLIST
|
||||
fmt.Printf("handlePeerBehaviorEvent: auto-blacklisting peer %s — reason: %s\n",
|
||||
p.PeerID, p.BlacklistReason)
|
||||
// Immediately evict any active stream so the peer can no longer heartbeat.
|
||||
if n.IndexerService != nil {
|
||||
n.IndexerService.EvictPeer(p.PeerID)
|
||||
}
|
||||
}
|
||||
|
||||
// Persist updated trust score + relation locally.
|
||||
if updated := access.UpdateOne(p.Serialize(p), p.GetID()); updated.Err != "" {
|
||||
fmt.Println("handlePeerBehaviorEvent: could not update peer:", updated.Err)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify oc-peer (and any other local NATS consumer) of the updated peer record
|
||||
// via CREATE_RESOURCE so they can synchronise their own state.
|
||||
if b, err := json.Marshal(p.Serialize(p)); err == nil {
|
||||
tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.PEER,
|
||||
Method: int(tools.CREATE_RESOURCE),
|
||||
Payload: b,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// handleObserveEvent processes a PB_OBSERVE PropalgationMessage from another
|
||||
// oc-discovery node, starting observation for the listed peers.
|
||||
func handleObserveEvent(n *Node, p tools.PropalgationMessage) {
|
||||
var cmd stream.ObserveCommand
|
||||
if err := json.Unmarshal(p.Payload, &cmd); err != nil {
|
||||
fmt.Println("handleObserveEvent: unmarshal error:", err)
|
||||
return
|
||||
}
|
||||
for _, sp := range cmd.Peers {
|
||||
if err := n.StreamService.OpenObserveStream(sp); err != nil {
|
||||
fmt.Println("handleObserveEvent: OpenObserveStream failed for", sp.PeerID, ":", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleObserveCloseEvent processes a PB_OBSERVE_CLOSE PropalgationMessage from
|
||||
// another oc-discovery node, stopping observation for the listed peer IDs.
|
||||
func handleObserveCloseEvent(n *Node, p tools.PropalgationMessage) {
|
||||
var cmd stream.ObserveCommand
|
||||
if err := json.Unmarshal(p.Payload, &cmd); err != nil {
|
||||
fmt.Println("handleObserveCloseEvent: unmarshal error:", err)
|
||||
return
|
||||
}
|
||||
if cmd.CloseAll {
|
||||
n.StreamService.CloseAllObserves()
|
||||
return
|
||||
}
|
||||
for _, peerID := range cmd.PeerIDs {
|
||||
if err := n.StreamService.CloseObserveStream(peerID); err != nil {
|
||||
fmt.Println("handleObserveCloseEvent: CloseObserveStream failed for", peerID, ":", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,24 +2,30 @@ package node
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"oc-discovery/daemons/node/indexer"
|
||||
"oc-discovery/daemons/node/location"
|
||||
"oc-discovery/daemons/node/pubsub"
|
||||
"oc-discovery/daemons/node/stream"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/google/uuid"
|
||||
"github.com/libp2p/go-libp2p"
|
||||
pubsubs "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/crypto"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
"github.com/libp2p/go-libp2p/p2p/security/noise"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
@@ -30,6 +36,12 @@ type Node struct {
|
||||
StreamService *stream.StreamService
|
||||
PeerID pp.ID
|
||||
isIndexer bool
|
||||
peerRecord *indexer.PeerRecord
|
||||
|
||||
// peerSearches: one active peer search per user; new search cancels previous.
|
||||
peerSearches *common.SearchTracker
|
||||
|
||||
Mu sync.RWMutex
|
||||
}
|
||||
|
||||
func InitNode(isNode bool, isIndexer bool) (*Node, error) {
|
||||
@@ -38,7 +50,7 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
|
||||
}
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("retrieving private key...")
|
||||
priv, err := common.LoadKeyFromFilePrivate() // your node private key
|
||||
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -48,13 +60,17 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
logger.Info().Msg("open a host...")
|
||||
gater := newOCConnectionGater(nil) // host set below after creation
|
||||
h, err := libp2p.New(
|
||||
libp2p.PrivateNetwork(psk),
|
||||
libp2p.Identity(priv),
|
||||
libp2p.Security(noise.ID, noise.New),
|
||||
libp2p.ListenAddrStrings(
|
||||
fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", conf.GetConfig().NodeEndpointPort),
|
||||
),
|
||||
libp2p.ConnectionGater(gater),
|
||||
)
|
||||
gater.host = h // wire host back into gater now that it exists
|
||||
logger.Info().Msg("Host open on " + h.ID().String())
|
||||
if err != nil {
|
||||
return nil, errors.New("no host no node")
|
||||
@@ -62,8 +78,16 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
|
||||
node := &Node{
|
||||
PeerID: h.ID(),
|
||||
isIndexer: isIndexer,
|
||||
LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000, false),
|
||||
LongLivedStreamRecordedService: common.NewStreamRecordedService[interface{}](h, 1000),
|
||||
peerSearches: common.NewSearchTracker(),
|
||||
}
|
||||
// Register the bandwidth probe handler so any peer measuring this node's
|
||||
// throughput can open a dedicated probe stream and read the echo.
|
||||
h.SetStreamHandler(common.ProtocolBandwidthProbe, common.HandleBandwidthProbe)
|
||||
// Register the witness query handler so peers can ask this node's view of indexers.
|
||||
h.SetStreamHandler(common.ProtocolWitnessQuery, func(s network.Stream) {
|
||||
common.HandleWitnessQuery(h, s)
|
||||
})
|
||||
var ps *pubsubs.PubSub
|
||||
if isNode {
|
||||
logger.Info().Msg("generate opencloud node...")
|
||||
@@ -72,40 +96,150 @@ func InitNode(isNode bool, isIndexer bool) (*Node, error) {
|
||||
panic(err) // can't run your node without a propalgation pubsub, of state of node.
|
||||
}
|
||||
node.PS = ps
|
||||
// buildRecord returns a fresh signed PeerRecord as JSON, embedded in each
|
||||
// heartbeat so the receiving indexer can republish it to the DHT directly.
|
||||
// peerRecord is nil until claimInfo runs, so the first ~20s heartbeats carry
|
||||
// no record — that's fine, claimInfo publishes once synchronously at startup.
|
||||
buildRecord := func() json.RawMessage {
|
||||
if node.peerRecord == nil {
|
||||
return nil
|
||||
}
|
||||
priv, err := tools.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
fresh := *node.peerRecord
|
||||
ttl := time.Duration(fresh.TTLSeconds) * time.Second
|
||||
if ttl <= 0 {
|
||||
ttl = indexer.DefaultTTLSeconds * time.Second
|
||||
}
|
||||
fresh.UpdateDate = time.Now().UTC()
|
||||
fresh.PeerRecordPayload.ExpiryDate = time.Now().UTC().Add(ttl)
|
||||
payload, _ := json.Marshal(fresh.PeerRecordPayload)
|
||||
fresh.Signature, err = priv.Sign(payload)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
b, _ := json.Marshal(fresh)
|
||||
return json.RawMessage(b)
|
||||
}
|
||||
logger.Info().Msg("connect to indexers...")
|
||||
common.ConnectToIndexers(node.Host, 0, 5, node.PeerID) // TODO : make var to change how many indexers are allowed.
|
||||
common.ConnectToIndexers(node.Host, conf.GetConfig().MinIndexer, conf.GetConfig().MaxIndexer, buildRecord)
|
||||
logger.Info().Msg("claims my node...")
|
||||
if _, err := node.claimInfo(conf.GetConfig().Name, conf.GetConfig().Hostname); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
logger.Info().Msg("subscribe to decentralized search flow...")
|
||||
logger.Info().Msg("run garbage collector...")
|
||||
node.StartGC(30 * time.Second)
|
||||
|
||||
if node.StreamService, err = stream.InitStream(context.Background(), node.Host, node.PeerID, 1000, node); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
node.StreamService.IsPeerKnown = func(pid pp.ID) bool {
|
||||
// 1. Local DB: known peer (handles blacklist).
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
results := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
|
||||
},
|
||||
}, pid.String(), false, 0, 1)
|
||||
for _, item := range results.Data {
|
||||
p, ok := item.(*peer.Peer)
|
||||
if !ok || p.PeerID != pid.String() {
|
||||
continue
|
||||
}
|
||||
return p.Relation != peer.BLACKLIST
|
||||
}
|
||||
// 2. Ask a connected indexer → DHT lookup by peer_id.
|
||||
for _, addr := range common.Indexers.GetAddrs() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 4*time.Second)
|
||||
s, err := h.NewStream(ctx, addr.Info.ID, common.ProtocolGet)
|
||||
cancel()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
json.NewEncoder(s).Encode(indexer.GetValue{PeerID: pid.String()})
|
||||
var resp indexer.GetResponse
|
||||
json.NewDecoder(s).Decode(&resp)
|
||||
s.Reset()
|
||||
return resp.Found
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if node.PubSubService, err = pubsub.InitPubSub(context.Background(), node.Host, node.PS, node, node.StreamService); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
f := func(ctx context.Context, evt common.Event, topic string) {
|
||||
if p, err := node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
|
||||
node.StreamService.SendResponse(p[0], &evt)
|
||||
m := map[string]interface{}{}
|
||||
err := json.Unmarshal(evt.Payload, &m)
|
||||
if err != nil || evt.From == node.PeerID.String() {
|
||||
return
|
||||
}
|
||||
fmt.Println("PUBSUB SendResponse bef peerrece")
|
||||
if p, err := node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 && m["search"] != nil {
|
||||
fmt.Println("PUBSUB SendResponse af peerrece", m)
|
||||
node.StreamService.SendResponse(p[0], &evt, fmt.Sprintf("%v", m["search"]))
|
||||
}
|
||||
}
|
||||
node.SubscribeToSearch(node.PS, &f)
|
||||
node.AllowInbound = func(remotePeer pp.ID, isNew bool) error {
|
||||
if isNew {
|
||||
// DB blacklist check: blocks reconnection after EvictPeer + blacklist.
|
||||
if !node.isPeerKnown(remotePeer) {
|
||||
return errors.New("peer is blacklisted or unknown")
|
||||
}
|
||||
if !node.ConnGuard.Allow() {
|
||||
return errors.New("connection rate limit exceeded, retry later")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
logger.Info().Msg("subscribe to decentralized search flow...")
|
||||
go node.SubscribeToSearch(node.PS, &f)
|
||||
logger.Info().Msg("connect to NATS")
|
||||
go ListenNATS(node)
|
||||
logger.Info().Msg("Node is actually running.")
|
||||
}
|
||||
if isIndexer {
|
||||
logger.Info().Msg("generate opencloud indexer...")
|
||||
node.IndexerService = indexer.NewIndexerService(node.Host, ps, 5)
|
||||
node.IndexerService = indexer.NewIndexerService(node.Host, ps, 500)
|
||||
}
|
||||
logger.Info().Msg("connect to NATS")
|
||||
ListenNATS(*node)
|
||||
logger.Info().Msg("Node is actually running.")
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// isPeerKnown is the stream-level gate: returns true if pid is allowed.
|
||||
// Check order (fast → slow):
|
||||
// 1. In-memory stream records — currently heartbeating to this indexer.
|
||||
// 2. Local DB by peer_id — known peer, blacklist enforced here.
|
||||
// 3. DHT /pid/{peerID} → /node/{DID} — registered on any indexer.
|
||||
//
|
||||
// ProtocolHeartbeat and ProtocolPublish handlers do NOT call this — they are
|
||||
// the streams through which a node first makes itself known.
|
||||
func (d *Node) isPeerKnown(pid pp.ID) bool {
|
||||
// 1. Fast path: active heartbeat session.
|
||||
d.StreamMU.RLock()
|
||||
_, active := d.StreamRecords[common.ProtocolHeartbeat][pid]
|
||||
d.StreamMU.RUnlock()
|
||||
if active {
|
||||
return true
|
||||
}
|
||||
// 2. Local DB: known peer (handles blacklist).
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
results := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: pid.String()}},
|
||||
},
|
||||
}, pid.String(), false, 0, 1)
|
||||
for _, item := range results.Data {
|
||||
p, ok := item.(*peer.Peer)
|
||||
if !ok || p.PeerID != pid.String() {
|
||||
continue
|
||||
}
|
||||
return p.Relation != peer.BLACKLIST
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *Node) Close() {
|
||||
if d.isIndexer && d.IndexerService != nil {
|
||||
d.IndexerService.Close()
|
||||
@@ -118,30 +252,25 @@ func (d *Node) Close() {
|
||||
func (d *Node) publishPeerRecord(
|
||||
rec *indexer.PeerRecord,
|
||||
) error {
|
||||
priv, err := common.LoadKeyFromFilePrivate() // your node private key
|
||||
priv, err := tools.LoadKeyFromFilePrivate() // your node private key
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if common.StreamIndexers[common.ProtocolPublish] == nil {
|
||||
return errors.New("no protocol Publish is set up on the node")
|
||||
}
|
||||
for _, ad := range common.StaticIndexers {
|
||||
if common.StreamIndexers[common.ProtocolPublish][ad.ID] == nil {
|
||||
return errors.New("no protocol Publish for peer " + ad.ID.String() + " is set up on the node")
|
||||
}
|
||||
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
|
||||
base := indexer.PeerRecord{
|
||||
Name: rec.Name,
|
||||
DID: rec.DID,
|
||||
PubKey: rec.PubKey,
|
||||
ExpiryDate: time.Now().UTC().Add(2 * time.Minute),
|
||||
}
|
||||
payload, _ := json.Marshal(base)
|
||||
hash := sha256.Sum256(payload)
|
||||
|
||||
rec.ExpiryDate = base.ExpiryDate
|
||||
rec.Signature, err = priv.Sign(hash[:])
|
||||
rec.TTL = 2
|
||||
for _, ad := range common.Indexers.GetAddrs() {
|
||||
var err error
|
||||
if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolPublish, "", common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{},
|
||||
&common.Indexers.MuStream); err != nil {
|
||||
continue
|
||||
}
|
||||
stream := common.Indexers.Streams.GetPerID(common.ProtocolPublish, ad.Info.ID)
|
||||
ttl := time.Duration(rec.TTLSeconds) * time.Second
|
||||
if ttl <= 0 {
|
||||
ttl = indexer.DefaultTTLSeconds * time.Second
|
||||
}
|
||||
rec.ExpiryDate = time.Now().UTC().Add(ttl)
|
||||
payload, _ := json.Marshal(rec.PeerRecordPayload)
|
||||
rec.Signature, err = priv.Sign(payload)
|
||||
if err := json.NewEncoder(stream.Stream).Encode(&rec); err != nil { // then publish on stream
|
||||
return err
|
||||
}
|
||||
@@ -149,45 +278,118 @@ func (d *Node) publishPeerRecord(
|
||||
return nil
|
||||
}
|
||||
|
||||
// SearchPeerRecord starts a distributed peer search via ProtocolSearchPeer.
|
||||
// A new call for the same userKey cancels any previous search.
|
||||
// Results are pushed to onResult as they arrive; the function returns when
|
||||
// the stream closes (idle timeout, explicit cancel, or indexer unreachable).
|
||||
func (d *Node) SearchPeerRecord(userKey, needle string, onResult func(indexer.PeerRecord)) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
idleTimeout := common.SearchIdleTimeout()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Register cancels any previous search for userKey and starts the idle timer.
|
||||
// The composite key doubles as QueryID so the indexer echoes it back.
|
||||
searchKey := d.peerSearches.Register(userKey, cancel, idleTimeout)
|
||||
defer d.peerSearches.Cancel(searchKey)
|
||||
|
||||
req := common.SearchPeerRequest{QueryID: searchKey}
|
||||
if pid, err := pp.Decode(needle); err == nil {
|
||||
req.PeerID = pid.String()
|
||||
} else if _, err := uuid.Parse(needle); err == nil {
|
||||
req.DID = needle
|
||||
} else {
|
||||
req.Name = needle
|
||||
}
|
||||
fmt.Println("PROPALGATE PEER", needle, common.Indexers.GetAddrs())
|
||||
for _, ad := range common.Indexers.GetAddrs() {
|
||||
if ad.Info == nil {
|
||||
continue
|
||||
}
|
||||
dialCtx, dialCancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
s, err := d.Host.NewStream(dialCtx, ad.Info.ID, common.ProtocolSearchPeer)
|
||||
dialCancel()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if err := json.NewEncoder(s).Encode(req); err != nil {
|
||||
s.Reset()
|
||||
continue
|
||||
}
|
||||
// Interrupt the blocking Decode as soon as the context is cancelled
|
||||
// (idle timer, explicit PB_CLOSE_SEARCH, or replacement search).
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
s.SetReadDeadline(time.Now())
|
||||
}()
|
||||
seen := map[string]struct{}{}
|
||||
dec := json.NewDecoder(s)
|
||||
for {
|
||||
var result indexer.SearchPeerResult
|
||||
if err := dec.Decode(&result); err != nil {
|
||||
break
|
||||
}
|
||||
if result.QueryID != searchKey || !d.peerSearches.IsActive(searchKey) {
|
||||
break
|
||||
}
|
||||
d.peerSearches.ResetIdle(searchKey)
|
||||
for _, hit := range result.Records {
|
||||
key := hit.PeerID
|
||||
if key == "" {
|
||||
key = hit.DID
|
||||
}
|
||||
if _, already := seen[key]; already {
|
||||
continue
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
onResult(hit)
|
||||
}
|
||||
}
|
||||
s.Reset()
|
||||
return
|
||||
}
|
||||
logger.Warn().Str("user", userKey).Msg("[search] no reachable indexer for peer search")
|
||||
}
|
||||
|
||||
func (d *Node) GetPeerRecord(
|
||||
ctx context.Context,
|
||||
key string,
|
||||
pidOrdid string,
|
||||
) ([]*peer.Peer, error) {
|
||||
var err error
|
||||
var info map[string]indexer.PeerRecord
|
||||
if common.StreamIndexers[common.ProtocolPublish] == nil {
|
||||
return nil, errors.New("no protocol Publish is set up on the node")
|
||||
// Build the GetValue request: if pidOrdid is neither a UUID DID nor a libp2p
|
||||
// PeerID, treat it as a human-readable name and let the indexer resolve it.
|
||||
// GetPeerRecord resolves by PeerID or DID only.
|
||||
// Name-based search goes through SearchPeerRecord (ProtocolSearchPeer).
|
||||
getReq := indexer.GetValue{Key: pidOrdid}
|
||||
if pidR, pidErr := pp.Decode(pidOrdid); pidErr == nil {
|
||||
getReq.PeerID = pidR.String()
|
||||
getReq.Key = ""
|
||||
}
|
||||
for _, ad := range common.StaticIndexers {
|
||||
if common.StreamIndexers[common.ProtocolPublish][ad.ID] == nil {
|
||||
return nil, errors.New("no protocol Publish for peer " + ad.ID.String() + " is set up on the node")
|
||||
for _, ad := range common.Indexers.GetAddrs() {
|
||||
if common.Indexers.Streams, err = common.TempStream(d.Host, *ad.Info, common.ProtocolGet, "",
|
||||
common.Indexers.Streams, map[protocol.ID]*common.ProtocolInfo{}, &common.Indexers.MuStream); err != nil {
|
||||
continue
|
||||
}
|
||||
stream := common.StreamIndexers[common.ProtocolPublish][ad.ID]
|
||||
if err := json.NewEncoder(stream.Stream).Encode(indexer.GetValue{Key: key}); err != nil {
|
||||
return nil, err
|
||||
stream := common.Indexers.Streams.GetPerID(common.ProtocolGet, ad.Info.ID)
|
||||
if err := json.NewEncoder(stream.Stream).Encode(getReq); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for {
|
||||
var resp indexer.GetResponse
|
||||
if err := json.NewDecoder(stream.Stream).Decode(&resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.Found {
|
||||
info = resp.Records
|
||||
break
|
||||
}
|
||||
var resp indexer.GetResponse
|
||||
if err := json.NewDecoder(stream.Stream).Decode(&resp); err != nil {
|
||||
continue
|
||||
}
|
||||
if resp.Found {
|
||||
info = resp.Records
|
||||
}
|
||||
break
|
||||
}
|
||||
var ps []*peer.Peer
|
||||
for _, pr := range info {
|
||||
if pk, err := pr.Verify(); err != nil {
|
||||
return nil, err
|
||||
} else if ok, p, err := pr.ExtractPeer(d.PeerID.String(), key, pk); err != nil {
|
||||
} else if _, p, err := pr.ExtractPeer(d.PeerID.String(), pr.PeerID, pk); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
if ok {
|
||||
d.publishPeerRecord(&pr)
|
||||
}
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
@@ -202,12 +404,21 @@ func (d *Node) claimInfo(
|
||||
if endPoint == "" {
|
||||
return nil, errors.New("no endpoint found for peer")
|
||||
}
|
||||
peerID := uuid.New().String()
|
||||
priv, err := common.LoadKeyFromFilePrivate()
|
||||
did := uuid.New().String()
|
||||
|
||||
peers := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: d.Host.ID().String()}},
|
||||
},
|
||||
}, "", false, 0, 1)
|
||||
if len(peers.Data) > 0 {
|
||||
did = peers.Data[0].GetID() // if already existing set up did as made
|
||||
}
|
||||
priv, err := tools.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub, err := common.LoadKeyFromFilePublic()
|
||||
pub, err := tools.LoadKeyFromFilePublic()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -217,39 +428,118 @@ func (d *Node) claimInfo(
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
expiry := now.Add(150 * time.Second)
|
||||
pRec := indexer.PeerRecordPayload{
|
||||
Name: name,
|
||||
DID: did, // REAL PEER ID
|
||||
PubKey: pubBytes,
|
||||
|
||||
IsNano: oclib.GetConfig().IsNano,
|
||||
TTLSeconds: indexer.DefaultTTLSeconds,
|
||||
ExpiryDate: now.Add(indexer.DefaultTTLSeconds * time.Second),
|
||||
}
|
||||
d.PeerID = d.Host.ID()
|
||||
payload, _ := json.Marshal(pRec)
|
||||
|
||||
rec := &indexer.PeerRecord{
|
||||
Name: name,
|
||||
DID: peerID, // REAL PEER ID
|
||||
PubKey: pubBytes,
|
||||
PeerRecordPayload: pRec,
|
||||
}
|
||||
|
||||
rec.PeerID = d.Host.ID().String()
|
||||
d.PeerID = d.Host.ID()
|
||||
|
||||
payload, _ := json.Marshal(rec)
|
||||
hash := sha256.Sum256(payload)
|
||||
|
||||
rec.Signature, err = priv.Sign(hash[:])
|
||||
rec.CreationDate = time.Now().UTC()
|
||||
rec.UpdateDate = time.Now().UTC()
|
||||
rec.Signature, err = priv.Sign(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rec.PeerID = d.Host.ID().String()
|
||||
rec.APIUrl = endPoint
|
||||
rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + " /tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + " /p2p/" + rec.PeerID
|
||||
rec.StreamAddress = "/ip4/" + conf.GetConfig().Hostname + "/tcp/" + fmt.Sprintf("%v", conf.GetConfig().NodeEndpointPort) + "/p2p/" + rec.PeerID
|
||||
rec.NATSAddress = oclib.GetConfig().NATSUrl
|
||||
rec.WalletAddress = "my-wallet"
|
||||
rec.ExpiryDate = expiry
|
||||
rec.Location = location.Geolocate(conf.GetConfig().LocationGranularity)
|
||||
|
||||
if err := d.publishPeerRecord(rec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
/*if pk, err := rec.Verify(); err != nil {
|
||||
fmt.Println("Verify")
|
||||
d.peerRecord = rec
|
||||
if _, err := rec.Verify(); err != nil {
|
||||
return nil, err
|
||||
} else {*/
|
||||
_, p, err := rec.ExtractPeer(peerID, peerID, pub)
|
||||
return p, err
|
||||
//}
|
||||
} else {
|
||||
_, p, err := rec.ExtractPeer(did, did, pub)
|
||||
b, err := json.Marshal(p)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.PEER,
|
||||
Method: int(tools.CREATE_RESOURCE),
|
||||
SearchAttr: "peer_id",
|
||||
Payload: b,
|
||||
})
|
||||
return p, err
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteRecord broadcasts a signed tombstone to all connected indexers, signalling
|
||||
// that this node is voluntarily leaving the network.
|
||||
// Each indexer verifies the signature, stores the tombstone in the DHT (replacing
|
||||
// the live record), and evicts the peer from its active pool.
|
||||
// After a successful call, d.peerRecord is set to nil.
|
||||
func (d *Node) DeleteRecord() error {
|
||||
if d.peerRecord == nil {
|
||||
return errors.New("no peer record to delete")
|
||||
}
|
||||
priv, err := tools.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pubBytes, err := crypto.MarshalPublicKey(priv.GetPublic())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tp := indexer.TombstonePayload{
|
||||
DID: d.peerRecord.DID,
|
||||
PeerID: d.PeerID.String(),
|
||||
DeletedAt: time.Now().UTC(),
|
||||
}
|
||||
payloadBytes, _ := json.Marshal(tp)
|
||||
sig, err := priv.Sign(payloadBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts := &indexer.TombstoneRecord{
|
||||
TombstonePayload: tp,
|
||||
PubKey: pubBytes,
|
||||
Tombstone: true,
|
||||
Signature: sig,
|
||||
}
|
||||
data, _ := json.Marshal(ts)
|
||||
for _, ad := range common.Indexers.GetAddrs() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
s, err := d.Host.NewStream(ctx, ad.Info.ID, common.ProtocolDelete)
|
||||
cancel()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
s.SetDeadline(time.Now().Add(5 * time.Second))
|
||||
s.Write(data)
|
||||
s.Close()
|
||||
}
|
||||
d.peerRecord = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
TODO:
|
||||
- Le booking est un flow neuf décentralisé :
|
||||
On check on attend une réponse, on valide, il passe par discovery, on relais.
|
||||
- Le shared workspace est une affaire de décentralisation,
|
||||
on communique avec les shared les mouvements
|
||||
- Un shared remplace la notion de partnership à l'échelle de partnershipping
|
||||
-> quand on share un workspace on devient partenaire temporaire
|
||||
qu'on le soit originellement ou non.
|
||||
-> on a alors les mêmes privilèges.
|
||||
- Les orchestrations admiralty ont le même fonctionnement.
|
||||
Un evenement provoque alors une création de clé de service.
|
||||
|
||||
On doit pouvoir crud avec verification de signature un DBobject.
|
||||
*/
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"oc-discovery/daemons/node/common"
|
||||
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
func (ps *PubSubService) handleEvent(ctx context.Context, topicName string, evt *common.Event) error {
|
||||
action := ps.getTopicName(topicName)
|
||||
if err := ps.handleEventSearch(ctx, evt, action); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *PubSubService) handleEventSearch( // only : on partner followings. 3 canals for every partner.
|
||||
ctx context.Context,
|
||||
evt *common.Event,
|
||||
action tools.PubSubAction,
|
||||
) error {
|
||||
if !(action == tools.PB_SEARCH_RESPONSE || action == tools.PB_SEARCH) {
|
||||
return nil
|
||||
}
|
||||
if p, err := ps.Node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 { // peerFrom is Unique
|
||||
if err := evt.Verify(p[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
switch action {
|
||||
case tools.PB_SEARCH: // when someone ask for search.
|
||||
if err := ps.StreamService.SendResponse(p[0], evt); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -4,64 +4,71 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"oc-discovery/daemons/node/stream"
|
||||
"oc-discovery/models"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
func (ps *PubSubService) SearchPublishEvent(
|
||||
ctx context.Context, dt *tools.DataType, typ string, user string, search string) error {
|
||||
ctx context.Context, dt *tools.DataType, typ string, user string, groups []string, search string) error {
|
||||
b, err := json.Marshal(map[string]string{"search": search})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch typ {
|
||||
case "known": // define Search Strategy
|
||||
return ps.StreamService.SearchKnownPublishEvent(dt, user, search) //if partners focus only them*/
|
||||
return ps.StreamService.PublishesCommon(dt, user, groups, nil, b, stream.ProtocolSearchResource) //if partners focus only them*/
|
||||
case "partner": // define Search Strategy
|
||||
return ps.StreamService.SearchPartnersPublishEvent(dt, user, search) //if partners focus only them*/
|
||||
return ps.StreamService.PublishesCommon(dt, user, groups, &dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
Or: map[string][]dbs.Filter{
|
||||
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.PARTNER}},
|
||||
"is_nano": {{Operator: dbs.EQUAL.String(), Value: true}},
|
||||
},
|
||||
}, b, stream.ProtocolSearchResource)
|
||||
case "all": // Gossip PubSub
|
||||
b, err := json.Marshal(map[string]string{"search": search})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ps.searchPublishEvent(ctx, dt, user, b)
|
||||
idleTimeout := func() time.Duration {
|
||||
if t := conf.GetConfig().SearchTimeout; t > 0 {
|
||||
return time.Duration(t) * time.Second
|
||||
}
|
||||
return 5 * time.Second
|
||||
}()
|
||||
searchCtx, cancel := context.WithCancel(ctx)
|
||||
// Register cancels any previous search for this user and starts the idle timer.
|
||||
// The returned composite key is used as User in the GossipSub event so that
|
||||
// remote peers echo it back unchanged, allowing IsActive to validate results.
|
||||
searchKey := ps.StreamService.ResourceSearches.Register(user, cancel, idleTimeout)
|
||||
fmt.Println("PUBLISH ON PUBSUB", common.TopicPubSubSearch, searchKey)
|
||||
return ps.publishEvent(searchCtx, dt, tools.PB_SEARCH, common.TopicPubSubSearch, searchKey, b)
|
||||
default:
|
||||
return errors.New("no type of research found")
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *PubSubService) searchPublishEvent(
|
||||
ctx context.Context, dt *tools.DataType, user string, payload []byte) error {
|
||||
id, err := oclib.GenerateNodeID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ps.subscribeEvents(ctx, dt, tools.PB_SEARCH_RESPONSE, id, 60); err != nil { // TODO Catpure Event !
|
||||
return err
|
||||
}
|
||||
return ps.publishEvent(ctx, dt, tools.PB_SEARCH, user, "", payload, false)
|
||||
}
|
||||
|
||||
func (ps *PubSubService) publishEvent(
|
||||
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, user string,
|
||||
peerID string, payload []byte, chanNamedByDt bool,
|
||||
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, topicName string, user string, payload []byte,
|
||||
) error {
|
||||
name := action.String() + "#" + peerID
|
||||
if chanNamedByDt && dt != nil { // if a datatype is precised then : app.action.datatype#peerID
|
||||
name = action.String() + "." + (*dt).String() + "#" + peerID
|
||||
}
|
||||
|
||||
from, err := oclib.GenerateNodeID()
|
||||
priv, err := tools.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
priv, err := common.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
msg, _ := json.Marshal(models.NewEvent(name, from, dt, user, payload, priv))
|
||||
topic, err := ps.PS.Join(name)
|
||||
if err != nil {
|
||||
return err
|
||||
msg, _ := json.Marshal(models.NewEvent(action.String(), ps.Host.ID().String(), dt, user, payload, priv))
|
||||
topic := ps.Node.GetPubSub(topicName)
|
||||
if topic == nil {
|
||||
topic, err = ps.PS.Join(topicName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return topic.Publish(ctx, msg)
|
||||
}
|
||||
|
||||
@@ -4,17 +4,13 @@ import (
|
||||
"context"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"oc-discovery/daemons/node/stream"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/host"
|
||||
)
|
||||
|
||||
type PubSubService struct {
|
||||
*common.LongLivedPubSubService
|
||||
Node common.DiscoveryPeer
|
||||
Host host.Host
|
||||
PS *pubsub.PubSub
|
||||
@@ -24,24 +20,12 @@ type PubSubService struct {
|
||||
}
|
||||
|
||||
func InitPubSub(ctx context.Context, h host.Host, ps *pubsub.PubSub, node common.DiscoveryPeer, streamService *stream.StreamService) (*PubSubService, error) {
|
||||
service := &PubSubService{
|
||||
LongLivedPubSubService: common.NewLongLivedPubSubService(h),
|
||||
Node: node,
|
||||
StreamService: streamService,
|
||||
PS: ps,
|
||||
}
|
||||
logger := oclib.GetLogger()
|
||||
logger.Info().Msg("subscribe to events...")
|
||||
service.initSubscribeEvents(ctx)
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (ps *PubSubService) getTopicName(topicName string) tools.PubSubAction {
|
||||
ns := strings.Split(topicName, ".")
|
||||
if len(ns) > 0 {
|
||||
return tools.GetActionString(ns[0])
|
||||
}
|
||||
return tools.NONE
|
||||
return &PubSubService{
|
||||
Host: h,
|
||||
Node: node,
|
||||
StreamService: streamService,
|
||||
PS: ps,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ix *PubSubService) Close() {
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
package pubsub
|
||||
|
||||
import (
|
||||
"context"
|
||||
"oc-discovery/daemons/node/common"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
)
|
||||
|
||||
func (ps *PubSubService) initSubscribeEvents(ctx context.Context) error {
|
||||
if err := ps.subscribeEvents(ctx, nil, tools.PB_SEARCH, "", -1); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generic function to subscribe to DHT flow of event
|
||||
func (ps *PubSubService) subscribeEvents(
|
||||
ctx context.Context, dt *tools.DataType, action tools.PubSubAction, peerID string, timeout int,
|
||||
) error {
|
||||
logger := oclib.GetLogger()
|
||||
// define a name app.action#peerID
|
||||
name := action.String() + "#" + peerID
|
||||
if dt != nil { // if a datatype is precised then : app.action.datatype#peerID
|
||||
name = action.String() + "." + (*dt).String() + "#" + peerID
|
||||
}
|
||||
f := func(ctx context.Context, evt common.Event, topicName string) {
|
||||
if p, err := ps.Node.GetPeerRecord(ctx, evt.From); err == nil && len(p) > 0 {
|
||||
if err := ps.processEvent(ctx, p[0], &evt, topicName); err != nil {
|
||||
logger.Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return common.SubscribeEvents(ps.LongLivedPubSubService, ctx, name, -1, f)
|
||||
}
|
||||
|
||||
func (ps *PubSubService) processEvent(
|
||||
ctx context.Context, p *peer.Peer, event *common.Event, topicName string) error {
|
||||
if err := event.Verify(p); err != nil {
|
||||
return err
|
||||
}
|
||||
return ps.handleEvent(ctx, topicName, event)
|
||||
}
|
||||
362
daemons/node/stream/dnt_cache.go
Normal file
362
daemons/node/stream/dnt_cache.go
Normal file
@@ -0,0 +1,362 @@
|
||||
package stream
|
||||
|
||||
// dnt_cache.go — Disconnection Network Tolerance cache for outbound stream requests.
|
||||
//
|
||||
// When a stream write fails because the remote peer is unreachable, the request
|
||||
// is saved here and retried on the next tick. Two levels are defined:
|
||||
//
|
||||
// - dntCritical : retry indefinitely (create / update / delete resource).
|
||||
// - dntModerate : up to dntMaxModerateRetries retries, then abandon.
|
||||
//
|
||||
// Pubsub messages and search streams are explicitly excluded.
|
||||
// Streams initiated from the indexer side are never enqueued here.
|
||||
//
|
||||
// # Crash-resilient persistence
|
||||
//
|
||||
// Critical entries are written to an encrypted file (AES-256-GCM) so they
|
||||
// survive a node crash/restart. The AES key is derived deterministically from
|
||||
// the node's Ed25519 private key via HKDF-SHA256 — no extra secret to manage.
|
||||
// Moderate entries are intentionally not persisted: their retry budget is small
|
||||
// enough that re-loading them after a restart would be misleading.
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"golang.org/x/crypto/hkdf"
|
||||
|
||||
"oc-discovery/conf"
|
||||
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
type dntLevel int
|
||||
|
||||
const (
|
||||
dntCritical dntLevel = iota // retry until the message is delivered
|
||||
dntModerate // retry up to dntMaxModerateRetries times
|
||||
)
|
||||
|
||||
const dntMaxModerateRetries = 3
|
||||
const dntRetryInterval = 15 * time.Second
|
||||
|
||||
// dntProtocols maps each stream protocol to its DNT level.
|
||||
// Protocols absent from this map receive no caching (e.g. ProtocolSearchResource).
|
||||
var dntProtocols = map[protocol.ID]dntLevel{
|
||||
// Critical — data mutations that must eventually be delivered.
|
||||
ProtocolCreateResource: dntCritical,
|
||||
ProtocolUpdateResource: dntCritical,
|
||||
ProtocolDeleteResource: dntCritical,
|
||||
// Moderate — confirmations / config / planner: 3 retries before abandon.
|
||||
ProtocolVerifyResource: dntModerate,
|
||||
ProtocolSendPlanner: dntModerate,
|
||||
ProtocolConsidersResource: dntModerate,
|
||||
ProtocolMinioConfigResource: dntModerate,
|
||||
ProtocolAdmiraltyConfigResource: dntModerate,
|
||||
}
|
||||
|
||||
// dntEntryJSON is the on-disk representation of a dntEntry.
|
||||
// pp.AddrInfo and protocol.ID don't have built-in JSON tags so we flatten them.
|
||||
type dntEntryJSON struct {
|
||||
DID string `json:"did"`
|
||||
Addr pp.AddrInfo `json:"addr"`
|
||||
DT *tools.DataType `json:"dt,omitempty"`
|
||||
User string `json:"user"`
|
||||
Payload []byte `json:"payload"`
|
||||
Proto protocol.ID `json:"proto"`
|
||||
Retries int `json:"retries"`
|
||||
AddedAt time.Time `json:"added_at"`
|
||||
}
|
||||
|
||||
type dntEntry struct {
|
||||
did string
|
||||
addr pp.AddrInfo
|
||||
dt *tools.DataType
|
||||
user string
|
||||
payload []byte
|
||||
proto protocol.ID
|
||||
retries int
|
||||
addedAt time.Time
|
||||
}
|
||||
|
||||
func (e *dntEntry) toJSON() dntEntryJSON {
|
||||
return dntEntryJSON{
|
||||
DID: e.did,
|
||||
Addr: e.addr,
|
||||
DT: e.dt,
|
||||
User: e.user,
|
||||
Payload: e.payload,
|
||||
Proto: e.proto,
|
||||
Retries: e.retries,
|
||||
AddedAt: e.addedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func entryFromJSON(j dntEntryJSON) *dntEntry {
|
||||
return &dntEntry{
|
||||
did: j.DID,
|
||||
addr: j.Addr,
|
||||
dt: j.DT,
|
||||
user: j.User,
|
||||
payload: j.Payload,
|
||||
proto: j.Proto,
|
||||
retries: j.Retries,
|
||||
addedAt: j.AddedAt,
|
||||
}
|
||||
}
|
||||
|
||||
type dntCache struct {
|
||||
mu sync.Mutex
|
||||
entries []*dntEntry
|
||||
// aesKey is the derived AES-256 key used for on-disk encryption.
|
||||
// Nil when key derivation failed: persistence is disabled but the in-memory
|
||||
// cache continues to function normally.
|
||||
aesKey []byte
|
||||
}
|
||||
|
||||
// newDNTCache initialises the cache, derives the encryption key, and restores
|
||||
// any critical entries that were persisted before the last crash.
|
||||
func newDNTCache() *dntCache {
|
||||
log := oclib.GetLogger()
|
||||
c := &dntCache{}
|
||||
key, err := deriveDNTKey()
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("[dnt] key derivation failed — persistence disabled")
|
||||
} else {
|
||||
c.aesKey = key
|
||||
c.loadFromDisk()
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// enqueue adds an entry to the cache and persists critical entries to disk.
|
||||
func (c *dntCache) enqueue(e *dntEntry) {
|
||||
c.mu.Lock()
|
||||
c.entries = append(c.entries, e)
|
||||
c.mu.Unlock()
|
||||
if dntProtocols[e.proto] == dntCritical {
|
||||
go c.persistToDisk()
|
||||
}
|
||||
}
|
||||
|
||||
// drain atomically removes and returns all current entries.
|
||||
func (c *dntCache) drain() []*dntEntry {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
out := c.entries
|
||||
c.entries = nil
|
||||
return out
|
||||
}
|
||||
|
||||
// requeue puts entries back at the head of the list, preserving any new
|
||||
// entries added while the retry loop was running.
|
||||
func (c *dntCache) requeue(entries []*dntEntry) {
|
||||
if len(entries) == 0 {
|
||||
return
|
||||
}
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.entries = append(entries, c.entries...)
|
||||
}
|
||||
|
||||
// ── Persistence ──────────────────────────────────────────────────────────────
|
||||
|
||||
// dntCachePath returns the path of the on-disk cache file, placed next to the
|
||||
// node's private key so it lives on the same persistent volume.
|
||||
func dntCachePath() string {
|
||||
return filepath.Join(filepath.Dir(conf.GetConfig().PrivateKeyPath), "dnt_cache.bin")
|
||||
}
|
||||
|
||||
// deriveDNTKey derives a 32-byte AES key from the node's Ed25519 private key
|
||||
// using HKDF-SHA256. The derivation is deterministic: the same key is always
|
||||
// produced from the same private key, so no symmetric secret needs storing.
|
||||
func deriveDNTKey() ([]byte, error) {
|
||||
priv, err := tools.LoadKeyFromFilePrivate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Raw() on a libp2p Ed25519 private key returns the 64-byte representation
|
||||
// (32-byte seed || 32-byte public key). We use the full 64 bytes as IKM.
|
||||
raw, err := priv.Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reader := hkdf.New(sha256.New, raw, nil, []byte("oc-discovery/dnt-cache/v1"))
|
||||
key := make([]byte, 32)
|
||||
if _, err := io.ReadFull(reader, key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// persistToDisk encrypts all current critical entries and writes them to disk.
|
||||
// Non-critical entries are deliberately excluded — they are not worth restoring
|
||||
// after a restart given their limited retry budget.
|
||||
func (c *dntCache) persistToDisk() {
|
||||
if c.aesKey == nil {
|
||||
return
|
||||
}
|
||||
log := oclib.GetLogger()
|
||||
c.mu.Lock()
|
||||
var toSave []dntEntryJSON
|
||||
for _, e := range c.entries {
|
||||
if dntProtocols[e.proto] == dntCritical {
|
||||
toSave = append(toSave, e.toJSON())
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
plaintext, err := json.Marshal(toSave)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(c.aesKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
nonce := make([]byte, gcm.NonceSize())
|
||||
if _, err := io.ReadFull(rand.Reader, nonce); err != nil {
|
||||
return
|
||||
}
|
||||
ciphertext := gcm.Seal(nonce, nonce, plaintext, nil)
|
||||
|
||||
path := dntCachePath()
|
||||
tmp := path + ".tmp"
|
||||
if err := os.WriteFile(tmp, ciphertext, 0600); err != nil {
|
||||
log.Warn().Err(err).Msg("[dnt] failed to write cache file")
|
||||
return
|
||||
}
|
||||
if err := os.Rename(tmp, path); err != nil {
|
||||
log.Warn().Err(err).Msg("[dnt] failed to rename cache file")
|
||||
_ = os.Remove(tmp)
|
||||
}
|
||||
}
|
||||
|
||||
// loadFromDisk decrypts the on-disk cache and re-enqueues only critical entries.
|
||||
// Errors (missing file, decryption failure) are non-fatal: the cache simply
|
||||
// starts empty, which is safe.
|
||||
func (c *dntCache) loadFromDisk() {
|
||||
if c.aesKey == nil {
|
||||
return
|
||||
}
|
||||
log := oclib.GetLogger()
|
||||
path := dntCachePath()
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
log.Warn().Err(err).Msg("[dnt] failed to read cache file")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(c.aesKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(data) < gcm.NonceSize() {
|
||||
log.Warn().Msg("[dnt] cache file too short, ignoring")
|
||||
return
|
||||
}
|
||||
nonce, ciphertext := data[:gcm.NonceSize()], data[gcm.NonceSize():]
|
||||
plaintext, err := gcm.Open(nil, nonce, ciphertext, nil)
|
||||
if err != nil {
|
||||
log.Warn().Err(err).Msg("[dnt] cache file decryption failed (key mismatch?), ignoring")
|
||||
return
|
||||
}
|
||||
|
||||
var saved []dntEntryJSON
|
||||
if err := json.Unmarshal(plaintext, &saved); err != nil {
|
||||
log.Warn().Err(err).Msg("[dnt] cache file unmarshal failed, ignoring")
|
||||
return
|
||||
}
|
||||
|
||||
count := 0
|
||||
for _, j := range saved {
|
||||
// Only restore critical entries — moderate entries are intentionally
|
||||
// not persisted, but this guard defends against format changes.
|
||||
if dntProtocols[j.Proto] != dntCritical {
|
||||
continue
|
||||
}
|
||||
c.entries = append(c.entries, entryFromJSON(j))
|
||||
count++
|
||||
}
|
||||
if count > 0 {
|
||||
log.Info().Int("count", count).Msg("[dnt] restored critical entries from disk")
|
||||
}
|
||||
}
|
||||
|
||||
// ── Retry loop ────────────────────────────────────────────────────────────────
|
||||
|
||||
// startDNTLoop runs the background retry goroutine. Call once after init.
|
||||
func (s *StreamService) startDNTLoop() {
|
||||
logger := oclib.GetLogger()
|
||||
ticker := time.NewTicker(dntRetryInterval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
entries := s.dnt.drain()
|
||||
if len(entries) == 0 {
|
||||
continue
|
||||
}
|
||||
var keep []*dntEntry
|
||||
for _, e := range entries {
|
||||
_, err := s.write(e.did, &e.addr, e.dt, e.user, e.payload, e.proto)
|
||||
if err == nil {
|
||||
level := dntProtocols[e.proto]
|
||||
if level == dntCritical {
|
||||
logger.Info().
|
||||
Str("proto", string(e.proto)).
|
||||
Str("peer", e.did).
|
||||
Msg("[dnt] critical message delivered after retry")
|
||||
} else {
|
||||
logger.Info().
|
||||
Str("proto", string(e.proto)).
|
||||
Str("peer", e.did).
|
||||
Int("retries", e.retries).
|
||||
Msg("[dnt] moderate message delivered after retry")
|
||||
}
|
||||
continue
|
||||
}
|
||||
level := dntProtocols[e.proto]
|
||||
switch level {
|
||||
case dntCritical:
|
||||
keep = append(keep, e)
|
||||
case dntModerate:
|
||||
e.retries++
|
||||
if e.retries < dntMaxModerateRetries {
|
||||
keep = append(keep, e)
|
||||
} else {
|
||||
logger.Warn().
|
||||
Str("proto", string(e.proto)).
|
||||
Str("peer", e.did).
|
||||
Int("retries", e.retries).
|
||||
Msg("[dnt] moderate message abandoned after max retries")
|
||||
}
|
||||
}
|
||||
}
|
||||
s.dnt.requeue(keep)
|
||||
// Persist after each tick so the on-disk file reflects the current
|
||||
// state (entries delivered are removed, new ones from concurrent
|
||||
// enqueues are included).
|
||||
go s.dnt.persistToDisk()
|
||||
}
|
||||
}
|
||||
@@ -2,119 +2,229 @@ package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"strings"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/booking/planner"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/resources"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
)
|
||||
|
||||
func (ps *StreamService) getTopicName(topicName string) tools.PubSubAction {
|
||||
ns := strings.Split(topicName, ".")
|
||||
if len(ns) > 0 {
|
||||
return tools.GetActionString(ns[0])
|
||||
}
|
||||
return tools.NONE
|
||||
type Verify struct {
|
||||
IsVerified bool `json:"is_verified"`
|
||||
}
|
||||
|
||||
func (ps *StreamService) handleEvent(topicName string, evt *common.Event) error {
|
||||
action := ps.getTopicName(topicName)
|
||||
if err := ps.handleEventFromPartner(evt, action); err != nil {
|
||||
return err
|
||||
func (ps *StreamService) handleEvent(protocol string, evt *common.Event, s network.Stream) error {
|
||||
fmt.Println("handleEvent", protocol)
|
||||
// Heartbeat received on an outgoing ProtocolObserve stream.
|
||||
if protocol == ProtocolObserve {
|
||||
return ps.handleIncomingObserve(s)
|
||||
}
|
||||
if action == tools.PB_SEARCH_RESPONSE {
|
||||
if protocol == observeHBEventType {
|
||||
return ps.handleObserveHeartbeat(evt)
|
||||
}
|
||||
|
||||
ps.handleEventFromPartner(evt, protocol)
|
||||
/*if protocol == ProtocolVerifyResource {
|
||||
if evt.DataType == -1 {
|
||||
tools.NewNATSCaller().SetNATSPub(tools.VERIFY_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Method: int(tools.VERIFY_RESOURCE),
|
||||
Payload: evt.Payload,
|
||||
})
|
||||
} else if err := ps.verifyResponse(evt); err != nil {
|
||||
return err
|
||||
}
|
||||
}*/
|
||||
if protocol == ProtocolSendPlanner {
|
||||
fmt.Println("sendPlanner", evt)
|
||||
if err := ps.sendPlanner(evt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if protocol == ProtocolSearchResource && evt.DataType > -1 {
|
||||
if err := ps.retrieveResponse(evt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if protocol == ProtocolConsidersResource {
|
||||
if err := ps.pass(evt, tools.CONSIDERS_EVENT); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if protocol == ProtocolAdmiraltyConfigResource {
|
||||
if err := ps.pass(evt, tools.ADMIRALTY_CONFIG_EVENT); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if protocol == ProtocolMinioConfigResource {
|
||||
if err := ps.pass(evt, tools.MINIO_CONFIG_EVENT); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return errors.New("no action authorized available : " + protocol)
|
||||
}
|
||||
|
||||
func (abs *StreamService) verifyResponse(event *common.Event) error { //
|
||||
res, err := resources.ToResource(int(event.DataType), event.Payload)
|
||||
if err != nil || res == nil {
|
||||
return nil
|
||||
}
|
||||
verify := Verify{
|
||||
IsVerified: false,
|
||||
}
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(event.DataType), nil)
|
||||
data := access.LoadOne(res.GetID())
|
||||
if data.Err == "" && data.Data != nil {
|
||||
if b, err := json.Marshal(data.Data); err == nil {
|
||||
if res2, err := resources.ToResource(int(event.DataType), b); err == nil {
|
||||
verify.IsVerified = subtle.ConstantTimeCompare(res.GetSignature(), res2.GetSignature()) == 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if b, err := json.Marshal(verify); err == nil {
|
||||
abs.PublishCommon(nil, event.User, event.Groups, event.From, ProtocolVerifyResource, b)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abs *StreamService) sendPlanner(event *common.Event) error { //
|
||||
fmt.Println("sendPlanner", len(event.Payload))
|
||||
if len(event.Payload) == 0 {
|
||||
if plan, err := planner.GenerateShallow(&tools.APIRequest{Admin: true}); err == nil {
|
||||
if b, err := json.Marshal(plan); err == nil {
|
||||
abs.PublishCommon(nil, event.User, event.Groups, event.From, ProtocolSendPlanner, b)
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else { // if not empty so it's
|
||||
m := map[string]interface{}{}
|
||||
if err := json.Unmarshal(event.Payload, &m); err == nil {
|
||||
m["peer_id"] = event.From
|
||||
if pl, err := json.Marshal(m); err == nil {
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.PLANNER_EXECUTION, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(oclib.BOOKING),
|
||||
Method: int(tools.PLANNER_EXECUTION),
|
||||
Payload: pl,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abs *StreamService) retrieveResponse(event *common.Event) error { //
|
||||
if !abs.ResourceSearches.IsActive(event.User) {
|
||||
return nil // search already closed or timed out
|
||||
}
|
||||
res, err := resources.ToResource(int(event.DataType), event.Payload)
|
||||
if err != nil || res == nil {
|
||||
return nil
|
||||
}
|
||||
// A response arrived — reset the idle timeout.
|
||||
abs.ResourceSearches.ResetIdle(event.User)
|
||||
b, err := json.Marshal(res.Serialize(res))
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.CATALOG_SEARCH_EVENT, tools.NATSResponse{
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(event.DataType),
|
||||
Method: int(tools.CATALOG_SEARCH_EVENT),
|
||||
Method: int(tools.SEARCH_EVENT),
|
||||
Payload: b,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *StreamService) handleEventFromPartner(evt *common.Event, action tools.PubSubAction) error {
|
||||
if !(action == tools.PB_CREATE || action == tools.PB_UPDATE || action == tools.PB_DELETE) {
|
||||
return nil
|
||||
}
|
||||
resource, err := resources.ToResource(int(evt.DataType), evt.Payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := json.Marshal(resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch action {
|
||||
case tools.PB_SEARCH:
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
peers := access.Search(nil, evt.From, false)
|
||||
if len(peers.Data) > 0 {
|
||||
p := peers.Data[0].(*peer.Peer)
|
||||
// TODO : something if peer is missing in our side !
|
||||
ps.SendResponse(p, evt)
|
||||
} else if p, err := ps.Node.GetPeerRecord(context.Background(), evt.From); err == nil && len(p) > 0 { // peer from is peerID
|
||||
ps.SendResponse(p[0], evt)
|
||||
func (abs *StreamService) pass(event *common.Event, method tools.NATSMethod) error { //
|
||||
go tools.NewNATSCaller().SetNATSPub(method, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(event.DataType),
|
||||
Method: int(method),
|
||||
Payload: event.Payload,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *StreamService) handleEventFromPartner(evt *common.Event, protocol string) error {
|
||||
switch protocol {
|
||||
case ProtocolSearchResource:
|
||||
m := map[string]interface{}{}
|
||||
err := json.Unmarshal(evt.Payload, &m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case tools.PB_CREATE:
|
||||
case tools.PB_UPDATE:
|
||||
if search, ok := m["search"]; ok {
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
peers := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: evt.From}},
|
||||
},
|
||||
}, evt.From, false, 0, 1)
|
||||
if len(peers.Data) > 0 {
|
||||
p := peers.Data[0].(*peer.Peer)
|
||||
ps.SendResponse(p, evt, fmt.Sprintf("%v", search))
|
||||
} else if p, err := ps.Node.GetPeerRecord(context.Background(), evt.From); err == nil && len(p) > 0 { // peer from is peerID
|
||||
ps.SendResponse(p[0], evt, fmt.Sprintf("%v", search))
|
||||
}
|
||||
} else {
|
||||
fmt.Println("SEND SEARCH_EVENT SetNATSPub", m)
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.SEARCH_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(evt.DataType),
|
||||
Method: int(tools.SEARCH_EVENT),
|
||||
Payload: evt.Payload,
|
||||
})
|
||||
}
|
||||
case ProtocolCreateResource, ProtocolUpdateResource:
|
||||
fmt.Println("RECEIVED Protocol.Update", string(evt.Payload))
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.CREATE_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(evt.DataType),
|
||||
Method: int(tools.CREATE_RESOURCE),
|
||||
Payload: b,
|
||||
Payload: evt.Payload,
|
||||
})
|
||||
case tools.PB_DELETE:
|
||||
case ProtocolDeleteResource:
|
||||
go tools.NewNATSCaller().SetNATSPub(tools.REMOVE_RESOURCE, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.DataType(evt.DataType),
|
||||
Method: int(tools.REMOVE_RESOURCE),
|
||||
Payload: b,
|
||||
Payload: evt.Payload,
|
||||
})
|
||||
default:
|
||||
return errors.New("no action authorized available : " + action.String())
|
||||
return errors.New("no action authorized available : " + protocol)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (abs *StreamService) SendResponse(p *peer.Peer, event *common.Event) error {
|
||||
dts := []oclib.LibDataEnum{oclib.LibDataEnum(event.DataType)}
|
||||
func (abs *StreamService) SendResponse(p *peer.Peer, event *common.Event, search string) error {
|
||||
dts := []tools.DataType{tools.DataType(event.DataType)}
|
||||
if event.DataType == -1 { // expect all resources
|
||||
dts = []oclib.LibDataEnum{oclib.LibDataEnum(oclib.COMPUTE_RESOURCE), oclib.LibDataEnum(oclib.STORAGE_RESOURCE),
|
||||
oclib.LibDataEnum(oclib.PROCESSING_RESOURCE), oclib.LibDataEnum(oclib.DATA_RESOURCE), oclib.LibDataEnum(oclib.WORKFLOW_RESOURCE)}
|
||||
dts = []tools.DataType{
|
||||
tools.COMPUTE_RESOURCE,
|
||||
tools.STORAGE_RESOURCE,
|
||||
tools.PROCESSING_RESOURCE,
|
||||
tools.DATA_RESOURCE,
|
||||
tools.WORKFLOW_RESOURCE,
|
||||
}
|
||||
}
|
||||
var m map[string]string
|
||||
err := json.Unmarshal(event.Payload, &m)
|
||||
if err != nil {
|
||||
if self, err := oclib.GetMySelf(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dt := range dts {
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(event.DataType), nil)
|
||||
peerID := p.GetID()
|
||||
searched := access.Search(abs.FilterPeer(peerID, m["search"]), "", false)
|
||||
for _, ss := range searched.Data {
|
||||
if j, err := json.Marshal(ss); err == nil {
|
||||
if event.DataType != -1 {
|
||||
ndt := tools.DataType(dt.EnumIndex())
|
||||
abs.PublishResources(&ndt, event.User, peerID, j)
|
||||
} else {
|
||||
abs.PublishResources(nil, event.User, peerID, j)
|
||||
} else {
|
||||
for _, dt := range dts {
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(dt), nil)
|
||||
searched := access.Search(abs.FilterPeer(self.GetID(), event.Groups, search), "", false, 0, 0)
|
||||
for _, ss := range searched.Data {
|
||||
if j, err := json.Marshal(ss); err == nil {
|
||||
abs.PublishCommon(&dt, event.User, event.Groups, p.PeerID, ProtocolSearchResource, j)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
552
daemons/node/stream/observe.go
Normal file
552
daemons/node/stream/observe.go
Normal file
@@ -0,0 +1,552 @@
|
||||
package stream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"oc-discovery/daemons/node/common"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
)
|
||||
|
||||
// ProtocolObserve is the libp2p protocol for peer connectivity observation.
|
||||
// The requesting oc-discovery opens a stream to the remote oc-discovery and
|
||||
// sends an ObserveRequest. The remote side keeps the stream open and writes
|
||||
// ObserveHeartbeat events back every observeHBInterval seconds.
|
||||
const ProtocolObserve = "/opencloud/peer/observe/1.0"
|
||||
|
||||
// observeHBEventType is used as the common.Event.Type for heartbeat responses.
|
||||
const observeHBEventType = "/opencloud/peer/observe/heartbeat"
|
||||
|
||||
const observeHBInterval = 30 * time.Second
|
||||
const observeDrainDuration = 30 * time.Second
|
||||
|
||||
// observeBatchWindow is the accumulation window before a heartbeat batch is
|
||||
// flushed to NATS. All peer heartbeats received within this window are grouped
|
||||
// into a single PEER_OBSERVE_RESPONSE_EVENT, reducing NATS traffic.
|
||||
const observeBatchWindow = 2 * time.Second
|
||||
|
||||
// ObserveRequest is the first (and only) message sent by the observing side
|
||||
// when opening a ProtocolObserve stream.
|
||||
type ObserveRequest struct {
|
||||
// Close, when true, asks the remote side to stop the heartbeat goroutine
|
||||
// and remove the observer from its cache. Used for graceful teardown.
|
||||
Close bool `json:"close,omitempty"`
|
||||
}
|
||||
|
||||
// ObserveHeartbeat is sent by the observed side every observeHBInterval.
|
||||
type ObserveHeartbeat struct {
|
||||
State string `json:"state"` // always "online" when actively emitted
|
||||
}
|
||||
|
||||
// ShallowPeer is the minimal peer representation sent by oc-peer in a
|
||||
// PEER_OBSERVE_EVENT. StreamAddress lets oc-discovery connect without a DB
|
||||
// lookup; Address carries the NATSAddress (unused here, forwarded as-is).
|
||||
type ShallowPeer struct {
|
||||
ID string `json:"id"`
|
||||
PeerID string `json:"peer_id"`
|
||||
Address string `json:"address"`
|
||||
StreamAddress string `json:"stream_address"`
|
||||
}
|
||||
|
||||
// ObserveCommand is the payload carried by a PEER_OBSERVE_EVENT NATS message
|
||||
// (from oc-peer).
|
||||
//
|
||||
// Observe → User + Peers populated
|
||||
// Close → User + PeerIDs + Close=true
|
||||
// CloseAll → CloseAll=true (User optional)
|
||||
type ObserveCommand struct {
|
||||
User string `json:"user"`
|
||||
Peers []ShallowPeer `json:"peers,omitempty"`
|
||||
PeerIDs []string `json:"peer_ids,omitempty"`
|
||||
Close bool `json:"close,omitempty"`
|
||||
CloseAll bool `json:"close_all,omitempty"`
|
||||
}
|
||||
|
||||
// ── observe cache (observed side) ────────────────────────────────────────────
|
||||
|
||||
// observeCache tracks running heartbeat goroutines keyed by the observing
|
||||
// peer's libp2p PeerID string. It is used exclusively on the OBSERVED side.
|
||||
type observeCache struct {
|
||||
mu sync.Mutex
|
||||
cancels map[string]context.CancelFunc
|
||||
}
|
||||
|
||||
func newObserveCache() *observeCache {
|
||||
return &observeCache{cancels: map[string]context.CancelFunc{}}
|
||||
}
|
||||
|
||||
func (c *observeCache) set(pid string, cancel context.CancelFunc) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if old, ok := c.cancels[pid]; ok {
|
||||
old() // cancel previous goroutine if any
|
||||
}
|
||||
c.cancels[pid] = cancel
|
||||
}
|
||||
|
||||
func (c *observeCache) cancel(pid string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if fn, ok := c.cancels[pid]; ok {
|
||||
fn()
|
||||
delete(c.cancels, pid)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *observeCache) cancelAll() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
for _, fn := range c.cancels {
|
||||
fn()
|
||||
}
|
||||
c.cancels = map[string]context.CancelFunc{}
|
||||
}
|
||||
|
||||
func (c *observeCache) delete(pid string) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
delete(c.cancels, pid)
|
||||
}
|
||||
|
||||
// ── heartbeat batcher (observing side) ───────────────────────────────────────
|
||||
|
||||
// heartbeatBatcher accumulates peer_ids from incoming heartbeats over
|
||||
// observeBatchWindow, then flushes them in a single NATS call.
|
||||
// Using a map as the backing store deduplicates multiple heartbeats from the
|
||||
// same peer within the same window (should not happen, but is harmless).
|
||||
type heartbeatBatcher struct {
|
||||
mu sync.Mutex
|
||||
ids map[string]struct{}
|
||||
timer *time.Timer
|
||||
flush func(peerIDs []string)
|
||||
}
|
||||
|
||||
func newHeartbeatBatcher(flush func([]string)) *heartbeatBatcher {
|
||||
return &heartbeatBatcher{
|
||||
ids: make(map[string]struct{}),
|
||||
flush: flush,
|
||||
}
|
||||
}
|
||||
|
||||
// add records peerID in the current batch and arms the flush timer if needed.
|
||||
func (b *heartbeatBatcher) add(peerID string) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.ids[peerID] = struct{}{}
|
||||
if b.timer == nil {
|
||||
b.timer = time.AfterFunc(observeBatchWindow, b.fire)
|
||||
}
|
||||
}
|
||||
|
||||
// fire is called by the timer; it drains the batch and invokes flush.
|
||||
func (b *heartbeatBatcher) fire() {
|
||||
b.mu.Lock()
|
||||
ids := make([]string, 0, len(b.ids))
|
||||
for id := range b.ids {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
b.ids = make(map[string]struct{})
|
||||
b.timer = nil
|
||||
b.mu.Unlock()
|
||||
if len(ids) > 0 {
|
||||
b.flush(ids)
|
||||
}
|
||||
}
|
||||
|
||||
// flushObserveBatch is the flush function wired into the heartbeatBatcher.
|
||||
// It emits two NATS messages:
|
||||
// - PEER_OBSERVE_RESPONSE_EVENT → consumed by oc-peer (direct channel)
|
||||
// - PROPALGATION_EVENT / PB_PROPAGATE → consumed by other oc-discovery nodes
|
||||
func flushObserveBatch(peerIDs []string) {
|
||||
payload, err := json.Marshal(map[string]interface{}{
|
||||
"peer_ids": peerIDs,
|
||||
"state": "online",
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Direct notification to oc-peer.
|
||||
tools.NewNATSCaller().SetNATSPub(tools.PEER_OBSERVE_RESPONSE_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.PEER,
|
||||
Method: int(tools.PEER_OBSERVE_RESPONSE_EVENT),
|
||||
Payload: payload,
|
||||
})
|
||||
|
||||
// Broadcast to other oc-discovery nodes so they can forward to their
|
||||
// local oc-peer if needed.
|
||||
propPayload, err := json.Marshal(tools.PropalgationMessage{
|
||||
DataType: int(tools.PEER),
|
||||
Action: tools.PB_PROPAGATE,
|
||||
Payload: payload,
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
tools.NewNATSCaller().SetNATSPub(tools.PROPALGATION_EVENT, tools.NATSResponse{
|
||||
FromApp: "oc-discovery",
|
||||
Datatype: tools.PEER,
|
||||
Method: int(tools.PROPALGATION_EVENT),
|
||||
Payload: propPayload,
|
||||
})
|
||||
}
|
||||
|
||||
// ── incoming observe handler (observed side) ──────────────────────────────────
|
||||
|
||||
// handleIncomingObserve is registered as the ProtocolObserve stream handler.
|
||||
// It is called when a remote peer opens an observe stream to us.
|
||||
// The function reads the request, validates it, then starts (or stops) the
|
||||
// heartbeat goroutine and returns immediately — the goroutine owns the stream.
|
||||
func (s *StreamService) handleIncomingObserve(rawStream network.Stream) error {
|
||||
remotePeerID := rawStream.Conn().RemotePeer().String()
|
||||
addr := rawStream.Conn().RemoteMultiaddr().String()
|
||||
ad, err := pp.AddrInfoFromString(addr + "/p2p/" + remotePeerID)
|
||||
if err != nil {
|
||||
fmt.Println("qndlqnl EERR", addr, err)
|
||||
return err
|
||||
}
|
||||
log := oclib.GetLogger()
|
||||
|
||||
// Drain mode: reject any new observations for 30 s after a close-all.
|
||||
s.drainMu.RLock()
|
||||
draining := !s.drainUntil.IsZero() && time.Now().Before(s.drainUntil)
|
||||
s.drainMu.RUnlock()
|
||||
if draining {
|
||||
rawStream.Close()
|
||||
fmt.Println("Draining")
|
||||
return errors.New("Draining")
|
||||
}
|
||||
// Read the observe request (with a generous deadline to avoid hangs).
|
||||
// Guard: the requesting peer must not be blacklisted or be ourself.
|
||||
did := ""
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
res := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: remotePeerID}},
|
||||
},
|
||||
}, "", false, 0, 1)
|
||||
if len(res.Data) > 0 {
|
||||
p := res.Data[0].(*peer.Peer)
|
||||
did = p.GetID()
|
||||
if p.Relation == peer.BLACKLIST { // || p.Relation == peer.SELF
|
||||
rawStream.Close()
|
||||
fmt.Println("CLOSE blacklist or self")
|
||||
return errors.New("can't exploit blacklist or self")
|
||||
}
|
||||
}
|
||||
|
||||
// Replace any existing heartbeat goroutine for this observer.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
s.observeCache.set(remotePeerID, cancel)
|
||||
fmt.Println("LOOP OBSERVE")
|
||||
go func() {
|
||||
defer rawStream.Close()
|
||||
defer cancel()
|
||||
defer s.observeCache.delete(remotePeerID)
|
||||
|
||||
ticker := time.NewTicker(observeHBInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
hbPayload, _ := json.Marshal(ObserveHeartbeat{State: "online"})
|
||||
evt := common.NewEvent(observeHBEventType, s.Host.ID().String(), nil, "", hbPayload)
|
||||
if evt == nil {
|
||||
return
|
||||
}
|
||||
if s.Streams, err = common.TempStream(s.Host, *ad, ProtocolObserve, did, s.Streams, protocols, &s.Mu); err == nil {
|
||||
stream := s.Streams[ProtocolObserve][ad.ID]
|
||||
if err := json.NewEncoder(stream.Stream).Encode(evt); err != nil {
|
||||
// Moderate connectivity event: the observer is unreachable.
|
||||
// The deferred calls above purge this observer from the cache.
|
||||
fmt.Println("LOOP EVT ERR", err)
|
||||
log.Info().
|
||||
Str("observer", remotePeerID).
|
||||
Err(err).
|
||||
Msg("[observe] heartbeat write failed — moderate connectivity event, purging observer from cache")
|
||||
return
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
|
||||
rawStream.SetWriteDeadline(time.Now().Add(5 * time.Second))
|
||||
fmt.Println("LOOP EVT", evt)
|
||||
var err error
|
||||
if s.Streams, err = common.TempStream(s.Host, *ad, ProtocolObserve, did, s.Streams, protocols, &s.Mu); err == nil {
|
||||
stream := s.Streams[ProtocolObserve][ad.ID]
|
||||
if err := json.NewEncoder(stream.Stream).Encode(evt); err != nil {
|
||||
// Moderate connectivity event: the observer is unreachable.
|
||||
// The deferred calls above purge this observer from the cache.
|
||||
fmt.Println("LOOP EVT ERR", err)
|
||||
log.Info().
|
||||
Str("observer", remotePeerID).
|
||||
Err(err).
|
||||
Msg("[observe] heartbeat write failed — moderate connectivity event, purging observer from cache")
|
||||
return
|
||||
}
|
||||
}
|
||||
rawStream.SetWriteDeadline(time.Time{})
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ── heartbeat receiver (observing side) ───────────────────────────────────────
|
||||
|
||||
// handleObserveHeartbeat is called by readLoop when a heartbeat event arrives
|
||||
// on an outgoing ProtocolObserve stream. It queues the peer_id in the batch
|
||||
// accumulator; the batcher flushes to NATS after observeBatchWindow.
|
||||
func (ps *StreamService) handleObserveHeartbeat(evt *common.Event) error {
|
||||
// ps.hbBatcher.add(evt.From)
|
||||
flushObserveBatch([]string{evt.From})
|
||||
return nil
|
||||
}
|
||||
|
||||
// ── user→peer index (ref-counted observe management) ─────────────────────────
|
||||
|
||||
// userPeerIndex tracks which users are observing which peers.
|
||||
// A libp2p observe stream is kept open as long as at least one user watches
|
||||
// the peer; it is closed only when the last user stops.
|
||||
type userPeerIndex struct {
|
||||
mu sync.Mutex
|
||||
index map[string]map[string]struct{} // user → set of peer_id strings
|
||||
}
|
||||
|
||||
func newUserPeerIndex() *userPeerIndex {
|
||||
return &userPeerIndex{index: map[string]map[string]struct{}{}}
|
||||
}
|
||||
|
||||
// add registers user as an observer of peerID.
|
||||
// Returns true if peerID was not yet observed by any user (first observer).
|
||||
func (u *userPeerIndex) add(user, peerID string) (isFirst bool) {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
// Count total observers for peerID across all users before adding.
|
||||
total := 0
|
||||
for _, peers := range u.index {
|
||||
if _, ok := peers[peerID]; ok {
|
||||
total++
|
||||
}
|
||||
}
|
||||
if u.index[user] == nil {
|
||||
u.index[user] = map[string]struct{}{}
|
||||
}
|
||||
u.index[user][peerID] = struct{}{}
|
||||
return total == 0
|
||||
}
|
||||
|
||||
// remove unregisters user from peerID.
|
||||
// Returns true if no user is observing peerID anymore (last observer removed).
|
||||
func (u *userPeerIndex) remove(user, peerID string) (isLast bool) {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
delete(u.index[user], peerID)
|
||||
if len(u.index[user]) == 0 {
|
||||
delete(u.index, user)
|
||||
}
|
||||
for _, peers := range u.index {
|
||||
if _, ok := peers[peerID]; ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// removeUser removes all entries for user and returns the peer_ids that now
|
||||
// have no remaining observers (i.e., those whose streams should be closed).
|
||||
func (u *userPeerIndex) removeUser(user string) []string {
|
||||
u.mu.Lock()
|
||||
defer u.mu.Unlock()
|
||||
watched := u.index[user]
|
||||
delete(u.index, user)
|
||||
var orphans []string
|
||||
for peerID := range watched {
|
||||
found := false
|
||||
for _, peers := range u.index {
|
||||
if _, ok := peers[peerID]; ok {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
orphans = append(orphans, peerID)
|
||||
}
|
||||
}
|
||||
return orphans
|
||||
}
|
||||
|
||||
// ── NATS command handler (observing side) ─────────────────────────────────────
|
||||
|
||||
// HandleObserveNATSCommand processes a PEER_OBSERVE_EVENT received from oc-peer.
|
||||
func (ps *StreamService) HandleObserveNATSCommand(resp tools.NATSResponse) {
|
||||
log := oclib.GetLogger()
|
||||
var cmd ObserveCommand
|
||||
if err := json.Unmarshal(resp.Payload, &cmd); err != nil {
|
||||
log.Warn().Err(err).Msg("[observe] failed to unmarshal ObserveCommand")
|
||||
return
|
||||
}
|
||||
if cmd.CloseAll {
|
||||
log.Info().Msg("[observe] close-all received via NATS")
|
||||
ps.CloseAllObserves()
|
||||
return
|
||||
}
|
||||
if cmd.Close {
|
||||
for _, peerID := range cmd.PeerIDs {
|
||||
if isLast := ps.observeUsers.remove(cmd.User, peerID); isLast {
|
||||
if err := ps.closeObserveStream(peerID); err != nil {
|
||||
log.Warn().Str("peer", peerID).Err(err).Msg("[observe] closeObserveStream failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
// Observe: open streams for any new peer, using the address from the payload.
|
||||
for _, p := range cmd.Peers {
|
||||
if isFirst := ps.observeUsers.add(cmd.User, p.PeerID); isFirst {
|
||||
if err := ps.openObserveStream(p); err != nil {
|
||||
// Roll back the index entry so the next NATS command can retry.
|
||||
ps.observeUsers.remove(cmd.User, p.PeerID)
|
||||
log.Warn().Str("peer", p.PeerID).Err(err).Msg("[observe] openObserveStream failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ── outgoing observe management (observing side) ──────────────────────────────
|
||||
|
||||
// OpenObserveStream is the exported variant for inter-discovery propagation
|
||||
// (no user context available). It bypasses the user index and opens the stream
|
||||
// directly if not already open.
|
||||
func (ps *StreamService) OpenObserveStream(p ShallowPeer) error {
|
||||
return ps.openObserveStream(p)
|
||||
}
|
||||
|
||||
// CloseObserveStream is the exported variant for inter-discovery propagation.
|
||||
func (ps *StreamService) CloseObserveStream(toPeerID string) error {
|
||||
return ps.closeObserveStream(toPeerID)
|
||||
}
|
||||
|
||||
// openObserveStream opens a ProtocolObserve stream to p.
|
||||
// Uses p.StreamAddress directly; falls back to DB then DHT lookup if empty.
|
||||
func (ps *StreamService) openObserveStream(p ShallowPeer) error {
|
||||
streamAddr := p.StreamAddress
|
||||
fmt.Println("STREAM OBS", streamAddr)
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
res := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: p.PeerID}},
|
||||
},
|
||||
}, "", false, 0, 1)
|
||||
if streamAddr == "" {
|
||||
// Fallback: DB then DHT.
|
||||
if len(res.Data) > 0 {
|
||||
streamAddr = res.Data[0].(*peer.Peer).StreamAddress
|
||||
} else if peers, err := ps.Node.GetPeerRecord(context.Background(), p.PeerID); err == nil && len(peers) > 0 {
|
||||
streamAddr = peers[0].StreamAddress
|
||||
}
|
||||
}
|
||||
if len(res.Data) > 0 && res.Data[0].(*peer.Peer).Relation == peer.SELF {
|
||||
return errors.New("Can't send to self")
|
||||
}
|
||||
fmt.Println("STREAM OBS SSS", streamAddr)
|
||||
|
||||
if streamAddr == "" {
|
||||
return nil // can't resolve address — silently skip
|
||||
}
|
||||
|
||||
decodedID, err := pp.Decode(p.PeerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If a stream already exists, reuse it.
|
||||
ps.Mu.RLock()
|
||||
_, alreadyOpen := ps.Streams[ProtocolObserve][decodedID]
|
||||
ps.Mu.RUnlock()
|
||||
if alreadyOpen {
|
||||
return nil
|
||||
}
|
||||
ad, err := pp.AddrInfoFromString(streamAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("TempStream OBSERVE", ad)
|
||||
if ps.Streams, err = common.TempStream(ps.Host, *ad, ProtocolObserve, p.ID, ps.Streams, protocols, &ps.Mu); err == nil {
|
||||
rawStream := ps.Streams[ProtocolObserve][ad.ID]
|
||||
if hbPayload, err := json.Marshal(ObserveRequest{Close: false}); err == nil {
|
||||
if err := json.NewEncoder(rawStream.Stream).Encode(common.NewEvent(ProtocolObserve, ps.Host.ID().String(), nil, "", hbPayload)); err != nil {
|
||||
fmt.Println("ERR")
|
||||
rawStream.Stream.Close()
|
||||
return err
|
||||
}
|
||||
s := &common.Stream{
|
||||
Stream: rawStream.Stream,
|
||||
Expiry: time.Now().Add(365 * 24 * time.Hour),
|
||||
}
|
||||
ps.Mu.Lock()
|
||||
if ps.Streams[ProtocolObserve] == nil {
|
||||
ps.Streams[ProtocolObserve] = map[pp.ID]*common.Stream{}
|
||||
}
|
||||
ps.Streams[ProtocolObserve][ad.ID] = s
|
||||
ps.Mu.Unlock()
|
||||
|
||||
go ps.readLoop(s, ad.ID, ProtocolObserve, &common.ProtocolInfo{PersistantStream: true})
|
||||
}
|
||||
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// closeObserveStream closes the ProtocolObserve stream to toPeerID and notifies
|
||||
// the remote side.
|
||||
func (ps *StreamService) closeObserveStream(toPeerID string) error {
|
||||
decodedID, err := pp.Decode(toPeerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ps.Mu.Lock()
|
||||
if ps.Streams[ProtocolObserve] != nil {
|
||||
if s, ok := ps.Streams[ProtocolObserve][decodedID]; ok {
|
||||
_ = json.NewEncoder(s.Stream).Encode(ObserveRequest{Close: true})
|
||||
s.Stream.Close()
|
||||
delete(ps.Streams[ProtocolObserve], decodedID)
|
||||
}
|
||||
}
|
||||
ps.Mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloseAllObserves closes every outgoing ProtocolObserve stream, clears the
|
||||
// user index, and enters drain mode for observeDrainDuration.
|
||||
func (ps *StreamService) CloseAllObserves() {
|
||||
ps.Mu.Lock()
|
||||
for _, s := range ps.Streams[ProtocolObserve] {
|
||||
_ = json.NewEncoder(s.Stream).Encode(ObserveRequest{Close: true})
|
||||
s.Stream.Close()
|
||||
}
|
||||
delete(ps.Streams, ProtocolObserve)
|
||||
ps.Mu.Unlock()
|
||||
|
||||
// Reset user index so stale ref-counts don't block future opens.
|
||||
ps.observeUsers = newUserPeerIndex()
|
||||
|
||||
ps.drainMu.Lock()
|
||||
ps.drainUntil = time.Now().Add(observeDrainDuration)
|
||||
ps.drainMu.Unlock()
|
||||
}
|
||||
@@ -6,183 +6,171 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/tools"
|
||||
"github.com/libp2p/go-libp2p/core/network"
|
||||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
func (ps *StreamService) PublishResources(dt *tools.DataType, user string, toPeerID string, resource []byte) error {
|
||||
func (ps *StreamService) PublishesCommon(dt *tools.DataType, user string, groups []string, filter *dbs.Filters, resource []byte, protos ...protocol.ID) error {
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
p := access.LoadOne(toPeerID)
|
||||
if p.Err != "" {
|
||||
return errors.New(p.Err)
|
||||
var p oclib.LibDataShallow
|
||||
if filter == nil {
|
||||
p = access.LoadAll(false, 0, 10000)
|
||||
} else {
|
||||
ad, err := pp.AddrInfoFromString(p.Data.(*peer.Peer).StreamAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
p = access.Search(filter, "", false, 0, 10000)
|
||||
}
|
||||
for _, pes := range p.Data {
|
||||
for _, proto := range protos {
|
||||
if _, err := ps.PublishCommon(dt, user, groups, pes.(*peer.Peer).PeerID, proto, resource); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
ps.write(tools.PB_SEARCH, toPeerID, ad, dt, user, resource, ProtocolSearchResource, p.Data.(*peer.Peer).Relation == peer.PARTNER)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *StreamService) SearchKnownPublishEvent(dt *tools.DataType, user string, search string) error {
|
||||
func (ps *StreamService) PublishCommon(dt *tools.DataType, user string, groups []string, toPeerID string, proto protocol.ID, resource []byte) (*common.Stream, error) {
|
||||
fmt.Println("PublishCommon", toPeerID)
|
||||
if toPeerID == ps.Key.String() {
|
||||
fmt.Println("Can't send to ourself !")
|
||||
return nil, errors.New("Can't send to ourself !")
|
||||
}
|
||||
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
peers := access.Search(&dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
And: map[string][]dbs.Filter{
|
||||
"": {{Operator: dbs.NOT.String(), Value: dbs.Filters{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
And: map[string][]dbs.Filter{
|
||||
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.BLACKLIST}},
|
||||
},
|
||||
}}},
|
||||
p := access.Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{ // search by name if no filters are provided
|
||||
"peer_id": {{Operator: dbs.EQUAL.String(), Value: toPeerID}},
|
||||
},
|
||||
}, search, false)
|
||||
if peers.Err != "" {
|
||||
return errors.New(peers.Err)
|
||||
} else {
|
||||
b, err := json.Marshal(map[string]string{"search": search})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range peers.Data {
|
||||
ad, err := pp.AddrInfoFromString(p.(*peer.Peer).StreamAddress)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ps.write(tools.PB_SEARCH, p.GetID(), ad, dt, user, b, ProtocolSearchResource, p.(*peer.Peer).Relation == peer.PARTNER)
|
||||
}
|
||||
}, toPeerID, false, 0, 1)
|
||||
var pe *peer.Peer
|
||||
if len(p.Data) > 0 && p.Data[0].(*peer.Peer).Relation != peer.BLACKLIST {
|
||||
pe = p.Data[0].(*peer.Peer)
|
||||
} else if pps, err := ps.Node.GetPeerRecord(context.Background(), toPeerID); err == nil && len(pps) > 0 {
|
||||
pe = pps[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *StreamService) SearchPartnersPublishEvent(dt *tools.DataType, user string, search string) error {
|
||||
if peers, err := ps.searchPeer(fmt.Sprintf("%v", peer.PARTNER.EnumIndex())); err != nil {
|
||||
return err
|
||||
} else {
|
||||
b, err := json.Marshal(map[string]string{"search": search})
|
||||
if pe != nil {
|
||||
ad, err := pp.AddrInfoFromString(pe.StreamAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range peers {
|
||||
ad, err := pp.AddrInfoFromString(p.StreamAddress)
|
||||
if err != nil {
|
||||
continue
|
||||
stream, err := ps.write(toPeerID, ad, dt, user, resource, proto)
|
||||
if err != nil {
|
||||
if _, ok := dntProtocols[proto]; ok {
|
||||
ps.dnt.enqueue(&dntEntry{
|
||||
did: toPeerID,
|
||||
addr: *ad,
|
||||
dt: dt,
|
||||
user: user,
|
||||
payload: resource,
|
||||
proto: proto,
|
||||
addedAt: time.Now().UTC(),
|
||||
})
|
||||
}
|
||||
ps.write(tools.PB_SEARCH, p.GetID(), ad, dt, user, b, ProtocolSearchResource, true)
|
||||
return nil, err
|
||||
}
|
||||
return stream, nil
|
||||
}
|
||||
return nil
|
||||
return nil, errors.New("peer unvalid " + toPeerID)
|
||||
}
|
||||
|
||||
func (ps *StreamService) ToPartnerPublishEvent(
|
||||
ctx context.Context, action tools.PubSubAction, dt *tools.DataType, user string, payload []byte) error {
|
||||
ctx context.Context, action tools.PubSubAction, dt *tools.DataType, user string, groups []string, payload []byte) error {
|
||||
var proto protocol.ID
|
||||
proto = ProtocolCreateResource
|
||||
switch action {
|
||||
case tools.PB_DELETE:
|
||||
proto = ProtocolDeleteResource
|
||||
case tools.PB_UPDATE:
|
||||
proto = ProtocolUpdateResource
|
||||
}
|
||||
if *dt == tools.PEER {
|
||||
var p peer.Peer
|
||||
if err := json.Unmarshal(payload, &p); err != nil {
|
||||
return err
|
||||
}
|
||||
ad, err := pp.AddrInfoFromString(p.StreamAddress)
|
||||
if err != nil {
|
||||
if _, err := pp.Decode(p.PeerID); err != nil {
|
||||
return err
|
||||
}
|
||||
ps.mu.Lock()
|
||||
defer ps.mu.Unlock()
|
||||
if p.Relation == peer.PARTNER {
|
||||
if ps.Streams[ProtocolHeartbeatPartner] == nil {
|
||||
ps.Streams[ProtocolHeartbeatPartner] = map[pp.ID]*common.Stream{}
|
||||
}
|
||||
ps.ConnectToPartner(ad.ID, ad)
|
||||
} else if ps.Streams[ProtocolHeartbeatPartner] != nil && ps.Streams[ProtocolHeartbeatPartner][ad.ID] != nil {
|
||||
for _, pids := range ps.Streams {
|
||||
if pids[ad.ID] != nil {
|
||||
delete(pids, ad.ID)
|
||||
|
||||
if pe, err := oclib.GetMySelf(); err != nil {
|
||||
return err
|
||||
} else if pe.GetID() == p.GetID() {
|
||||
return fmt.Errorf("can't send to ourself")
|
||||
} else {
|
||||
pe.Relation = p.Relation
|
||||
pe.Verify = false
|
||||
if b2, err := json.Marshal(pe); err == nil {
|
||||
if _, err := ps.PublishCommon(dt, user, groups, p.PeerID, ProtocolUpdateResource, b2); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
var per peer.Peer
|
||||
if err := json.Unmarshal(payload, &per); err == nil && !strings.Contains(per.Relation.String(), "master") && !strings.Contains(per.Relation.String(), "nano") {
|
||||
for _, rel := range []peer.PeerRelation{peer.MASTER, peer.NANO} {
|
||||
ps.PublishesCommon(dt, user, groups, &dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"relation": {{Operator: dbs.EQUAL.String(), Value: rel}},
|
||||
},
|
||||
}, payload, proto)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if peers, err := ps.searchPeer(fmt.Sprintf("%v", peer.PARTNER.EnumIndex())); err != nil {
|
||||
return err
|
||||
} else {
|
||||
for _, p := range peers {
|
||||
for _, protocol := range protocols {
|
||||
ad, err := pp.AddrInfoFromString(p.StreamAddress)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
ps.write(action, p.GetID(), ad, dt, user, payload, protocol, true)
|
||||
}
|
||||
}
|
||||
ks := []protocol.ID{}
|
||||
for k := range protocolsPartners {
|
||||
ks = append(ks, k)
|
||||
}
|
||||
for _, rel := range []peer.PeerRelation{peer.PARTNER, peer.MASTER, peer.NANO} {
|
||||
ps.PublishesCommon(dt, user, groups, &dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"relation": {{Operator: dbs.EQUAL.String(), Value: rel}},
|
||||
},
|
||||
}, payload, proto)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StreamService) write(
|
||||
action tools.PubSubAction,
|
||||
did string,
|
||||
peerID *pp.AddrInfo,
|
||||
dt *tools.DataType,
|
||||
user string,
|
||||
payload []byte,
|
||||
proto protocol.ID,
|
||||
isAPartner bool) error {
|
||||
proto protocol.ID) (*common.Stream, error) {
|
||||
logger := oclib.GetLogger()
|
||||
|
||||
name := action.String() + "#" + peerID.ID.String()
|
||||
if dt != nil {
|
||||
name = action.String() + "." + (*dt).String() + "#" + peerID.ID.String()
|
||||
var err error
|
||||
pts := map[protocol.ID]*common.ProtocolInfo{}
|
||||
for k, v := range protocols {
|
||||
pts[k] = v
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.Streams[proto] == nil {
|
||||
s.Streams[proto] = map[pp.ID]*common.Stream{}
|
||||
for k, v := range protocolsPartners {
|
||||
pts[k] = v
|
||||
}
|
||||
// should create a very temp stream
|
||||
if s.Streams, err = common.TempStream(s.Host, *peerID, proto, did, s.Streams, pts, &s.Mu); err != nil {
|
||||
fmt.Println("TempStream", err)
|
||||
return nil, errors.New("no stream available for protocol " + fmt.Sprintf("%v", proto) + " from PID " + peerID.ID.String())
|
||||
}
|
||||
|
||||
if s.Streams[proto][peerID.ID] == nil {
|
||||
// should create a very temp stream
|
||||
ctxTTL, err := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
if err == nil {
|
||||
if isAPartner {
|
||||
ctxTTL = context.Background()
|
||||
}
|
||||
if s.Host.Network().Connectedness(peerID.ID) != network.Connected {
|
||||
_ = s.Host.Connect(ctxTTL, *peerID)
|
||||
str, err := s.Host.NewStream(ctxTTL, peerID.ID, ProtocolHeartbeatPartner)
|
||||
if err == nil {
|
||||
s.Streams[ProtocolHeartbeatPartner][peerID.ID] = &common.Stream{
|
||||
DID: did,
|
||||
Stream: str,
|
||||
Expiry: time.Now().UTC().Add(5 * time.Second),
|
||||
}
|
||||
str2, err := s.Host.NewStream(ctxTTL, peerID.ID, proto)
|
||||
if err == nil {
|
||||
s.Streams[proto][peerID.ID] = &common.Stream{
|
||||
DID: did,
|
||||
Stream: str2,
|
||||
Expiry: time.Now().UTC().Add(5 * time.Second),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return errors.New("no stream available for protocol " + fmt.Sprintf("%v", proto) + " from PID " + peerID.ID.String())
|
||||
}
|
||||
stream := s.Streams[proto][peerID.ID]
|
||||
|
||||
enc := json.NewEncoder(stream.Stream)
|
||||
|
||||
evt := common.NewEvent(name, peerID.ID.String(), dt, user, payload)
|
||||
if err := enc.Encode(evt); err != nil {
|
||||
evt := common.NewEvent(string(proto), s.Host.ID().String(), dt, user, payload)
|
||||
fmt.Println("SEND EVENT ", peerID, proto, evt.From, evt.DataType, evt.Timestamp)
|
||||
if err := json.NewEncoder(stream.Stream).Encode(evt); err != nil {
|
||||
stream.Stream.Close()
|
||||
logger.Err(err)
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
return nil
|
||||
if protocolInfo, ok := protocols[proto]; ok && protocolInfo.WaitResponse {
|
||||
go s.readLoop(stream, peerID.ID, proto, &common.ProtocolInfo{PersistantStream: true})
|
||||
}
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
@@ -3,7 +3,8 @@ package stream
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"errors"
|
||||
"io"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node/common"
|
||||
"strings"
|
||||
@@ -11,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
oclib "cloud.o-forge.io/core/oc-lib"
|
||||
"cloud.o-forge.io/core/oc-lib/config"
|
||||
"cloud.o-forge.io/core/oc-lib/dbs"
|
||||
"cloud.o-forge.io/core/oc-lib/models/peer"
|
||||
"cloud.o-forge.io/core/oc-lib/models/utils"
|
||||
@@ -21,154 +23,203 @@ import (
|
||||
"github.com/libp2p/go-libp2p/core/protocol"
|
||||
)
|
||||
|
||||
const ProtocolConsidersResource = "/opencloud/resource/considers/1.0"
|
||||
const ProtocolMinioConfigResource = "/opencloud/minio/config/1.0"
|
||||
const ProtocolAdmiraltyConfigResource = "/opencloud/admiralty/config/1.0"
|
||||
|
||||
const ProtocolSearchResource = "/opencloud/resource/search/1.0"
|
||||
const ProtocolCreateResource = "/opencloud/resource/create/1.0"
|
||||
const ProtocolUpdateResource = "/opencloud/resource/update/1.0"
|
||||
const ProtocolDeleteResource = "/opencloud/resource/delete/1.0"
|
||||
|
||||
const ProtocolSendPlanner = "/opencloud/resource/planner/1.0"
|
||||
const ProtocolVerifyResource = "/opencloud/resource/verify/1.0"
|
||||
const ProtocolHeartbeatPartner = "/opencloud/resource/heartbeat/partner/1.0"
|
||||
|
||||
var protocols = []protocol.ID{
|
||||
ProtocolSearchResource,
|
||||
ProtocolCreateResource,
|
||||
ProtocolUpdateResource,
|
||||
ProtocolDeleteResource,
|
||||
var protocols = map[protocol.ID]*common.ProtocolInfo{
|
||||
ProtocolConsidersResource: {WaitResponse: false, TTL: 3 * time.Second},
|
||||
ProtocolSendPlanner: {WaitResponse: true, TTL: 24 * time.Hour},
|
||||
ProtocolSearchResource: {WaitResponse: true, TTL: 1 * time.Minute},
|
||||
ProtocolVerifyResource: {WaitResponse: true, TTL: 1 * time.Minute},
|
||||
ProtocolMinioConfigResource: {WaitResponse: true, TTL: 1 * time.Minute},
|
||||
ProtocolAdmiraltyConfigResource: {WaitResponse: true, TTL: 1 * time.Minute},
|
||||
ProtocolObserve: {WaitResponse: true, TTL: 1 * time.Minute},
|
||||
}
|
||||
|
||||
var protocolsPartners = map[protocol.ID]*common.ProtocolInfo{
|
||||
ProtocolCreateResource: {TTL: 3 * time.Second},
|
||||
ProtocolUpdateResource: {TTL: 3 * time.Second},
|
||||
ProtocolDeleteResource: {TTL: 3 * time.Second},
|
||||
}
|
||||
|
||||
type StreamService struct {
|
||||
Key pp.ID
|
||||
Host host.Host
|
||||
Node common.DiscoveryPeer
|
||||
Streams common.ProtocolStream
|
||||
maxNodesConn int
|
||||
mu sync.Mutex
|
||||
// Stream map[protocol.ID]map[pp.ID]*daemons.Stream
|
||||
Key pp.ID
|
||||
Host host.Host
|
||||
Node common.DiscoveryPeer
|
||||
Streams common.ProtocolStream
|
||||
maxNodesConn int
|
||||
Mu sync.RWMutex
|
||||
ResourceSearches *common.SearchTracker
|
||||
// IsPeerKnown, when set, is called at stream open for every inbound protocol.
|
||||
// Return false to reset the stream immediately. Left nil until wired by the node.
|
||||
IsPeerKnown func(pid pp.ID) bool
|
||||
// dnt is the Disconnection Network Tolerance cache for outbound streams.
|
||||
dnt *dntCache
|
||||
// observeCache tracks running heartbeat goroutines on the OBSERVED side.
|
||||
observeCache *observeCache
|
||||
// hbBatcher accumulates incoming heartbeats (observing side) and flushes
|
||||
// them as a single NATS batch after observeBatchWindow.
|
||||
hbBatcher *heartbeatBatcher
|
||||
// drainUntil / drainMu implement the startup drain window: for 30 s after a
|
||||
// close-all, incoming ProtocolObserve requests are rejected so stale heartbeats
|
||||
// from a previous run cannot mix with fresh observations.
|
||||
drainUntil time.Time
|
||||
drainMu sync.RWMutex
|
||||
// observeUsers tracks which users are observing which peers so streams are
|
||||
// closed only when the last observer for a peer disconnects.
|
||||
observeUsers *userPeerIndex
|
||||
}
|
||||
|
||||
func InitStream(ctx context.Context, h host.Host, key pp.ID, maxNode int, node common.DiscoveryPeer) (*StreamService, error) {
|
||||
logger := oclib.GetLogger()
|
||||
service := &StreamService{
|
||||
Key: key,
|
||||
Node: node,
|
||||
Host: h,
|
||||
Streams: common.ProtocolStream{},
|
||||
maxNodesConn: maxNode,
|
||||
Key: key,
|
||||
Node: node,
|
||||
Host: h,
|
||||
Streams: common.ProtocolStream{},
|
||||
maxNodesConn: maxNode,
|
||||
ResourceSearches: common.NewSearchTracker(),
|
||||
dnt: newDNTCache(),
|
||||
observeCache: newObserveCache(),
|
||||
observeUsers: newUserPeerIndex(),
|
||||
}
|
||||
logger.Info().Msg("handle to partner heartbeat protocol...")
|
||||
service.Host.SetStreamHandler(ProtocolHeartbeatPartner, service.HandlePartnerHeartbeat)
|
||||
service.hbBatcher = newHeartbeatBatcher(flushObserveBatch)
|
||||
for proto := range protocols {
|
||||
service.Host.SetStreamHandler(proto, service.gate(service.HandleResponse))
|
||||
}
|
||||
// ProtocolObserve uses a dedicated handler (bidirectional, long-lived).
|
||||
logger.Info().Msg("connect to partners...")
|
||||
service.connectToPartners() // we set up a stream
|
||||
go service.StartGC(30 * time.Second)
|
||||
go service.StartGC(8 * time.Second)
|
||||
go service.startDNTLoop()
|
||||
return service, nil
|
||||
}
|
||||
|
||||
func (s *StreamService) HandlePartnerHeartbeat(stream network.Stream) {
|
||||
pid, hb, err := common.CheckHeartbeat(s.Host, stream, s.maxNodesConn)
|
||||
if err != nil {
|
||||
// gate wraps a stream handler with IsPeerKnown validation.
|
||||
// If the peer is unknown the entire connection is closed and the handler is not called.
|
||||
// IsPeerKnown is read at stream-open time so it works even when set after InitStream.
|
||||
func (s *StreamService) gatePrivilege(h func(network.Stream)) func(network.Stream) {
|
||||
return func(stream network.Stream) {
|
||||
if config.GetConfig().IsNano {
|
||||
d := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil).Search(&dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"relation": {{Operator: dbs.EQUAL.String(), Value: peer.MASTER}},
|
||||
},
|
||||
}, "", false, 0, 1)
|
||||
if len(d.Data) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
s.knowingGate(stream, h)
|
||||
}
|
||||
}
|
||||
|
||||
// gate wraps a stream handler with IsPeerKnown validation.
|
||||
// If the peer is unknown the entire connection is closed and the handler is not called.
|
||||
// IsPeerKnown is read at stream-open time so it works even when set after InitStream.
|
||||
func (s *StreamService) gate(h func(network.Stream)) func(network.Stream) {
|
||||
return func(stream network.Stream) {
|
||||
s.knowingGate(stream, h)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *StreamService) knowingGate(stream network.Stream, h func(network.Stream)) {
|
||||
if s.IsPeerKnown != nil && !s.IsPeerKnown(stream.Conn().RemotePeer()) {
|
||||
logger := oclib.GetLogger()
|
||||
logger.Warn().Str("peer", stream.Conn().RemotePeer().String()).Msg("[stream] unknown peer, closing connection")
|
||||
stream.Conn().Close()
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
h(stream)
|
||||
}
|
||||
|
||||
if s.Streams[ProtocolHeartbeatPartner] == nil {
|
||||
s.Streams[ProtocolHeartbeatPartner] = map[pp.ID]*common.Stream{}
|
||||
func (s *StreamService) HandleResponse(stream network.Stream) {
|
||||
s.Mu.Lock()
|
||||
defer s.Mu.Unlock()
|
||||
stream.Protocol()
|
||||
if s.Streams[stream.Protocol()] == nil {
|
||||
s.Streams[stream.Protocol()] = map[pp.ID]*common.Stream{}
|
||||
}
|
||||
streams := s.Streams[ProtocolHeartbeatPartner]
|
||||
// if record already seen update last seen
|
||||
if rec, ok := streams[*pid]; ok {
|
||||
rec.DID = hb.DID
|
||||
rec.Expiry = time.Now().UTC().Add(2 * time.Minute)
|
||||
} else { // if not in stream ?
|
||||
pid := stream.Conn().RemotePeer()
|
||||
ai, err := pp.AddrInfoFromP2pAddr(stream.Conn().RemoteMultiaddr())
|
||||
if err == nil {
|
||||
s.ConnectToPartner(pid, ai)
|
||||
}
|
||||
expiry := 1 * time.Minute
|
||||
|
||||
if protocols[stream.Protocol()] != nil {
|
||||
expiry = protocols[stream.Protocol()].TTL
|
||||
} else if protocolsPartners[stream.Protocol()] != nil {
|
||||
expiry = protocolsPartners[stream.Protocol()].TTL
|
||||
}
|
||||
go s.StartGC(30 * time.Second)
|
||||
|
||||
s.Streams[stream.Protocol()][stream.Conn().RemotePeer()] = &common.Stream{
|
||||
Stream: stream,
|
||||
Expiry: time.Now().UTC().Add(expiry + 1*time.Minute),
|
||||
}
|
||||
go s.readLoop(s.Streams[stream.Protocol()][stream.Conn().RemotePeer()],
|
||||
stream.Conn().RemotePeer(),
|
||||
stream.Protocol(), protocols[stream.Protocol()])
|
||||
}
|
||||
|
||||
func (s *StreamService) connectToPartners() error {
|
||||
peers, err := s.searchPeer(fmt.Sprintf("%v", peer.PARTNER.EnumIndex()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range peers {
|
||||
ad, err := pp.AddrInfoFromString(p.StreamAddress)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
pid, err := pp.Decode(p.PeerID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
s.ConnectToPartner(pid, ad)
|
||||
// heartbeat your partner.
|
||||
}
|
||||
for _, proto := range protocols {
|
||||
logger := oclib.GetLogger()
|
||||
// Register handlers for partner resource protocols (create/update/delete).
|
||||
// Connections to partners happen on-demand via TempStream when needed.
|
||||
for proto, info := range protocolsPartners {
|
||||
f := func(ss network.Stream) {
|
||||
if s.Streams[proto] == nil {
|
||||
s.Streams[proto] = map[pp.ID]*common.Stream{}
|
||||
}
|
||||
s.Streams[proto][ss.Conn().RemotePeer()] = &common.Stream{
|
||||
Stream: ss,
|
||||
Expiry: time.Now().UTC().Add(2 * time.Minute),
|
||||
Expiry: time.Now().UTC().Add(10 * time.Second),
|
||||
}
|
||||
s.readLoop(s.Streams[proto][ss.Conn().RemotePeer()])
|
||||
go s.readLoop(s.Streams[proto][ss.Conn().RemotePeer()], ss.Conn().RemotePeer(), proto, info)
|
||||
}
|
||||
fmt.Println("SetStreamHandler", proto)
|
||||
s.Host.SetStreamHandler(proto, f)
|
||||
logger.Info().Msg("SetStreamHandler " + string(proto))
|
||||
s.Host.SetStreamHandler(proto, s.gatePrivilege(f))
|
||||
}
|
||||
// TODO if handle... from partner then HeartBeat back
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StreamService) ConnectToPartner(pid pp.ID, ad *pp.AddrInfo) {
|
||||
logger := oclib.GetLogger()
|
||||
for _, proto := range protocols {
|
||||
f := func(ss network.Stream) {
|
||||
if s.Streams[proto] == nil {
|
||||
s.Streams[proto] = map[pp.ID]*common.Stream{}
|
||||
}
|
||||
s.Streams[proto][pid] = &common.Stream{
|
||||
Stream: ss,
|
||||
Expiry: time.Now().UTC().Add(2 * time.Minute),
|
||||
}
|
||||
s.readLoop(s.Streams[proto][pid])
|
||||
}
|
||||
if s.Host.Network().Connectedness(ad.ID) != network.Connected {
|
||||
if err := s.Host.Connect(context.Background(), *ad); err != nil {
|
||||
logger.Err(err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
s.Streams = common.AddStreamProtocol(nil, s.Streams, s.Host, proto, pid, s.Key, false, &f)
|
||||
}
|
||||
common.SendHeartbeat(context.Background(), ProtocolHeartbeatPartner, conf.GetConfig().Name,
|
||||
s.Host, s.Streams, []*pp.AddrInfo{ad}, 20*time.Second)
|
||||
}
|
||||
|
||||
func (s *StreamService) searchPeer(search string) ([]*peer.Peer, error) {
|
||||
/* TODO FOR TEST ONLY A VARS THAT DEFINE ADDRESS... deserialize */
|
||||
ps := []*peer.Peer{}
|
||||
if conf.GetConfig().PeerIDS != "" {
|
||||
for _, peerID := range strings.Split(conf.GetConfig().PeerIDS, ",") {
|
||||
ppID := strings.Split(peerID, ":")
|
||||
if conf.GetConfig().NanoIDS != "" {
|
||||
for _, peerID := range strings.Split(conf.GetConfig().NanoIDS, ",") {
|
||||
ppID := strings.Split(peerID, "/")
|
||||
ps = append(ps, &peer.Peer{
|
||||
AbstractObject: utils.AbstractObject{
|
||||
UUID: uuid.New().String(),
|
||||
Name: ppID[1],
|
||||
},
|
||||
PeerID: ppID[1],
|
||||
StreamAddress: "/ip4/127.0.0.1/tcp/" + ppID[0] + "/p2p/" + ppID[1],
|
||||
State: peer.ONLINE,
|
||||
PeerID: ppID[len(ppID)-1],
|
||||
StreamAddress: peerID,
|
||||
Relation: peer.NANO,
|
||||
})
|
||||
}
|
||||
}
|
||||
if conf.GetConfig().PeerIDS != "" {
|
||||
for _, peerID := range strings.Split(conf.GetConfig().PeerIDS, ",") {
|
||||
ppID := strings.Split(peerID, "/")
|
||||
ps = append(ps, &peer.Peer{
|
||||
AbstractObject: utils.AbstractObject{
|
||||
UUID: uuid.New().String(),
|
||||
Name: ppID[1],
|
||||
},
|
||||
PeerID: ppID[len(ppID)-1],
|
||||
StreamAddress: peerID,
|
||||
Relation: peer.PARTNER,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
access := oclib.NewRequestAdmin(oclib.LibDataEnum(oclib.PEER), nil)
|
||||
peers := access.Search(nil, search, false)
|
||||
peers := access.Search(nil, search, false, 0, 0)
|
||||
for _, p := range peers.Data {
|
||||
ps = append(ps, p.(*peer.Peer))
|
||||
}
|
||||
@@ -194,15 +245,11 @@ func (s *StreamService) StartGC(interval time.Duration) {
|
||||
}
|
||||
|
||||
func (s *StreamService) gc() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.Mu.Lock()
|
||||
defer s.Mu.Unlock()
|
||||
now := time.Now().UTC()
|
||||
|
||||
if s.Streams[ProtocolHeartbeatPartner] == nil {
|
||||
s.Streams[ProtocolHeartbeatPartner] = map[pp.ID]*common.Stream{}
|
||||
}
|
||||
streams := s.Streams[ProtocolHeartbeatPartner]
|
||||
for pid, rec := range streams {
|
||||
for pid, rec := range s.Streams[ProtocolHeartbeatPartner] {
|
||||
if now.After(rec.Expiry) {
|
||||
for _, sstreams := range s.Streams {
|
||||
if sstreams[pid] != nil {
|
||||
@@ -214,33 +261,58 @@ func (s *StreamService) gc() {
|
||||
}
|
||||
}
|
||||
|
||||
func (ps *StreamService) readLoop(s *common.Stream) {
|
||||
func (ps *StreamService) readLoop(s *common.Stream, id pp.ID, proto protocol.ID, protocolInfo *common.ProtocolInfo) {
|
||||
defer s.Stream.Close()
|
||||
defer func() {
|
||||
ps.Mu.Lock()
|
||||
defer ps.Mu.Unlock()
|
||||
delete(ps.Streams[proto], id)
|
||||
}()
|
||||
loop := true
|
||||
if !protocolInfo.PersistantStream && !protocolInfo.WaitResponse { // 2 sec is enough... to wait a response
|
||||
time.AfterFunc(2*time.Second, func() {
|
||||
loop = false
|
||||
})
|
||||
}
|
||||
for {
|
||||
if !loop {
|
||||
break
|
||||
}
|
||||
var evt common.Event
|
||||
if err := json.NewDecoder(s.Stream).Decode(&evt); err != nil {
|
||||
s.Stream.Close()
|
||||
// Any decode error (EOF, reset, malformed JSON) terminates the loop;
|
||||
// continuing on a dead/closed stream creates an infinite spin.
|
||||
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) ||
|
||||
strings.Contains(err.Error(), "reset") ||
|
||||
strings.Contains(err.Error(), "closed") ||
|
||||
strings.Contains(err.Error(), "too many connections") {
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
ps.handleEvent(evt.Type, &evt)
|
||||
ps.handleEvent(evt.Type, &evt, s.Stream)
|
||||
if protocolInfo.WaitResponse && !protocolInfo.PersistantStream {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (abs *StreamService) FilterPeer(peerID string, search string) *dbs.Filters {
|
||||
id, err := oclib.GetMySelf()
|
||||
func (abs *StreamService) FilterPeer(peerID string, groups []string, search string) *dbs.Filters {
|
||||
p, err := oclib.GetMySelf()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
groups = append(groups, "*")
|
||||
filter := map[string][]dbs.Filter{
|
||||
"creator_id": {{Operator: dbs.EQUAL.String(), Value: id}}, // is my resource...
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: p.GetID()}}, // is my resource...
|
||||
"": {{Operator: dbs.OR.String(), Value: &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{
|
||||
"abstractobject.access_mode": {{Operator: dbs.EQUAL.String(), Value: 1}}, // if public
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.access_mode": {{Operator: dbs.EQUAL.String(), Value: 1}}, // if public
|
||||
"abstractinstanciatedresource.instances": {{Operator: dbs.ELEMMATCH.String(), Value: &dbs.Filters{ // or got a partners instances
|
||||
And: map[string][]dbs.Filter{
|
||||
"resourceinstance.partnerships": {{Operator: dbs.ELEMMATCH.String(), Value: &dbs.Filters{
|
||||
And: map[string][]dbs.Filter{
|
||||
"resourcepartnership.peer_groups." + peerID: {{Operator: dbs.EXISTS.String(), Value: true}},
|
||||
"resourcepartnership.peer_groups." + peerID: {{Operator: dbs.IN.String(), Value: groups}},
|
||||
},
|
||||
}}},
|
||||
},
|
||||
@@ -248,15 +320,15 @@ func (abs *StreamService) FilterPeer(peerID string, search string) *dbs.Filters
|
||||
},
|
||||
}}},
|
||||
}
|
||||
|
||||
if search != "" {
|
||||
filter[" "] = []dbs.Filter{{Operator: dbs.OR.String(), Value: &dbs.Filters{
|
||||
Or: map[string][]dbs.Filter{ // filter by like name, short_description, description, owner, url if no filters are provided
|
||||
"abstractintanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractintanciatedresource.abstractresource.abstractobject.creator_id": {{Operator: dbs.EQUAL.String(), Value: search}},
|
||||
"abstractinstanciatedresource.abstractresource.abstractobject.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractinstanciatedresource.abstractresource.type": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractinstanciatedresource.abstractresource.short_description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractinstanciatedresource.abstractresource.description": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
"abstractinstanciatedresource.abstractresource.owners.name": {{Operator: dbs.LIKE.String(), Value: search}},
|
||||
},
|
||||
}}}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,33 @@
|
||||
#!/bin/bash
|
||||
|
||||
IMAGE_BASE_NAME="oc-discovery"
|
||||
DOCKERFILE_PATH="."
|
||||
|
||||
for i in {0..3}; do
|
||||
docker network create \
|
||||
--subnet=172.40.0.0/24 \
|
||||
discovery
|
||||
|
||||
for i in $(seq ${1:-0} ${2:-3}); do
|
||||
NUM=$((i + 1))
|
||||
PORT=$((4000 + $NUM))
|
||||
|
||||
IMAGE_NAME="${IMAGE_BASE_NAME}:${NUM}"
|
||||
|
||||
|
||||
echo "▶ Building image ${IMAGE_NAME} with CONF_NUM=${NUM}"
|
||||
docker build \
|
||||
--build-arg CONF_NUM=${NUM} \
|
||||
-t ${IMAGE_NAME} \
|
||||
-t "${IMAGE_BASE_NAME}_${NUM}" \
|
||||
${DOCKERFILE_PATH}
|
||||
|
||||
docker kill "${IMAGE_BASE_NAME}_${NUM}" | true
|
||||
docker rm "${IMAGE_BASE_NAME}_${NUM}" | true
|
||||
|
||||
echo "▶ Running container ${IMAGE_NAME} on port ${PORT}:${PORT}"
|
||||
docker run -d \
|
||||
--network="${3:-oc}" \
|
||||
-p ${PORT}:${PORT} \
|
||||
--name "${IMAGE_BASE_NAME}_${NUM}" \
|
||||
${IMAGE_NAME}
|
||||
"${IMAGE_BASE_NAME}_${NUM}"
|
||||
|
||||
docker network connect --ip "172.40.0.${NUM}" discovery "${IMAGE_BASE_NAME}_${NUM}"
|
||||
done
|
||||
@@ -1,7 +1,7 @@
|
||||
version: '3.4'
|
||||
|
||||
services:
|
||||
oc-schedulerd:
|
||||
oc-discovery:
|
||||
image: 'oc-discovery:latest'
|
||||
ports:
|
||||
- 9002:8080
|
||||
|
||||
@@ -4,5 +4,5 @@
|
||||
"NATS_URL": "nats://nats:4222",
|
||||
"NODE_MODE": "indexer",
|
||||
"NODE_ENDPOINT_PORT": 4002,
|
||||
"INDEXER_ADDRESSES": "/ip4/oc-discovery1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
|
||||
"INDEXER_ADDRESSES": "/ip4/172.40.0.5/tcp/4005/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
|
||||
}
|
||||
@@ -4,5 +4,6 @@
|
||||
"NATS_URL": "nats://nats:4222",
|
||||
"NODE_MODE": "node",
|
||||
"NODE_ENDPOINT_PORT": 4003,
|
||||
"INDEXER_ADDRESSES": "/ip4/oc-discovery2/tcp/4002/p2p/12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"
|
||||
"NAME": "opencloud-demo-1",
|
||||
"INDEXER_ADDRESSES": "/ip4/172.40.0.2/tcp/4002/p2p/12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
{
|
||||
"MONGO_URL":"mongodb://mongo:27017/",
|
||||
"MONGO_URL":"mongodb://mongo2:27017/",
|
||||
"MONGO_DATABASE":"DC_myDC",
|
||||
"NATS_URL": "nats://nats:4222",
|
||||
"NATS_URL": "nats://nats2:4222",
|
||||
"NODE_MODE": "node",
|
||||
"NODE_ENDPOINT_PORT": 4004,
|
||||
"INDEXER_ADDRESSES": "/ip4/oc-discovery1/tcp/4001/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu",
|
||||
"PEER_IDS": "/ip4/oc-discovery3/tcp/4003/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
|
||||
}
|
||||
"NAME": "opencloud-demo-2",
|
||||
"INDEXER_ADDRESSES": "/ip4/172.40.0.5/tcp/4005/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
|
||||
}
|
||||
|
||||
@@ -2,5 +2,6 @@
|
||||
"MONGO_URL":"mongodb://mongo:27017/",
|
||||
"MONGO_DATABASE":"DC_myDC",
|
||||
"NATS_URL": "nats://nats:4222",
|
||||
"NODE_MODE": "indexer"
|
||||
"NODE_MODE": "indexer",
|
||||
"NODE_ENDPOINT_PORT": 4005
|
||||
}
|
||||
9
docker_discovery6.json
Normal file
9
docker_discovery6.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"MONGO_URL":"mongodb://mongo3:27017/",
|
||||
"MONGO_DATABASE":"DC_myDC",
|
||||
"NATS_URL": "nats://nats3:4222",
|
||||
"NODE_MODE": "node",
|
||||
"NODE_ENDPOINT_PORT": 4006,
|
||||
"NAME": "opencloud-demo-3",
|
||||
"INDEXER_ADDRESSES": "/ip4/172.40.0.5/tcp/4005/p2p/12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"
|
||||
}
|
||||
472
docs/COMPARAISON_SYSTEMES_ET_PROPOSITION.md
Normal file
472
docs/COMPARAISON_SYSTEMES_ET_PROPOSITION.md
Normal file
@@ -0,0 +1,472 @@
|
||||
# Comparaison entre les Systèmes Existants et la Proposition d'Architecture Décentralisée Souveraine
|
||||
|
||||
**Catégorie** : Systèmes distribués — Revue comparative et positionnement architectural
|
||||
**Domaine d'application** : Systèmes embarqués, contexte spatial, réseaux hostiles, marchés de ressources décentralisés
|
||||
**Version** : 1.1 — Mars 2026
|
||||
|
||||
---
|
||||
|
||||
## Résumé
|
||||
|
||||
Ce document présente une analyse comparative systématique entre les principaux systèmes décentralisés existants — de la découverte P2P aux blockchains — et la proposition d'architecture pour un réseau décentralisé souverain opérant dans un contexte embarqué et hostile (désignée ci-après "la proposition"). Le contexte de référence est celui du projet GARDEN (Generic Architecture for Resilient Decentralized Execution Networks), qui requiert une combinaison de propriétés rarement satisfaites simultanément par un système unique : découverte décentralisée de pairs, souveraineté des données, confidentialité transactionnelle, tolérance aux disruptions réseau (DTN), et gestion de consortiums à niveaux de confiance hétérogènes.
|
||||
|
||||
L'analyse montre qu'aucun système existant ne répond simultanément à l'ensemble de ces exigences. IPFS/libp2p offre une découverte décentralisée mature mais sans confidentialité native. Hyperledger Fabric offre une blockchain de consortium avec canaux privés mais suppose une connectivité continue. Secret Network offre des smart contracts confidentiels mais repose sur une dépendance matérielle (Intel SGX). Les systèmes de marketplace décentralisée (Golem, Akash) fournissent des mécanismes de marché mais n'abordent pas la tolérance DTN ni la gestion de la confiance relative.
|
||||
|
||||
La proposition se distingue en intégrant des mécanismes issus de plusieurs domaines de la littérature : scoring comportemental multidimensionnel inspiré des protocoles SWIM [Das et al., 2002], architecture blockchain hybride combinant blockchain permissionnée intra-consortium et règlement confidentiel inter-consortium, et adaptations DTN dérivées de la RFC 4838 [Cerf et al., 2007]. Cette synthèse architecturale constitue une contribution originale par rapport à l'existant.
|
||||
|
||||
Un point structurant de la proposition, absent de la majorité des systèmes existants, est la **séparation explicite entre le système de découverte et d'échange pair-à-pair** (profil AP, haute disponibilité, cohérence éventuelle) et le **système de règlement monétaire** (profil CP, cohérence forte, finalité déterministe). Ces deux sous-systèmes ont des prérequis mutuellement incompatibles : leur fusion dans un protocole unique dégraderait les garanties des deux. Le tableau comparatif intègre ce critère de séparation.
|
||||
|
||||
Les divergences détaillées entre la proposition et un système de référence en cours de développement — avec criticité, options de développement, et feuille de route — sont documentées dans le fichier `NOTE_DIVERGENCE_SYSTEME_REFERENCE.md`.
|
||||
|
||||
---
|
||||
|
||||
## 1. Critères de Comparaison
|
||||
|
||||
Les critères suivants sont retenus pour l'analyse comparative. Chaque critère est justifié par rapport aux exigences du contexte GARDEN.
|
||||
|
||||
| Critère | Définition | Justification GARDEN |
|
||||
|---|---|---|
|
||||
| **Décentralisation** | Absence de point unique de contrôle ou de défaillance | Contexte hostile : tout point centralisé est une cible prioritaire |
|
||||
| **Confidentialité** | Opacité du contenu ET des métadonnées pour les observateurs non autorisés | Watchers passifs, acteurs adversariaux dans le réseau |
|
||||
| **Résistance Sybil** | Capacité à résister à la création massive d'identités fictives [Douceur, 2002] | Réseau potentiellement infiltré par des adversaires |
|
||||
| **Tolérance DTN** | Opérabilité sous connectivité intermittente, haute latence, partitions prolongées [Fall, 2003] | Contrainte structurelle spatiale et embarquée |
|
||||
| **Scalabilité** | Passage à l'échelle sans dégradation prohibitive des performances | Multi-cluster, multi-organisation |
|
||||
| **Finalité des transactions** | Délai et certitude de l'irréversibilité des transactions monétaires | Prévention du double-spend dans les contextes DTN |
|
||||
| **Smart contracts** | Capacité d'automatiser le règlement conditionnel à des événements vérifiables | Marché de ressources : paiement automatique à la livraison attestée |
|
||||
| **Séparation découverte / règlement** | Architecture explicitement distincte entre la couche de découverte P2P (AP) et la couche de règlement monétaire (CP) | Prérequis mutuellement incompatibles — toute fusion dégrade les garanties des deux |
|
||||
| **Gestion de consortiums** | Mécanismes d'admission, révocation, et niveaux de confiance différenciés | Coalitions multi-organisations à confiance hétérogène |
|
||||
| **Souveraineté** | Contrôle par le créateur du cycle de vie de ses données et identités | Exigence fondamentale du contexte spatial/militaire |
|
||||
| **Maturité** | Disponibilité en production, documentation, écosystème | Faisabilité d'implémentation dans un délai raisonnable |
|
||||
| **Adaptabilité embarquée** | Fonctionnalité sous contraintes énergétiques et de bande passante sévères | Nœuds spatiaux à ressources limitées |
|
||||
|
||||
---
|
||||
|
||||
## 2. Systèmes de Découverte et Stockage P2P
|
||||
|
||||
### 2.1 IPFS / Libp2p (Benet, 2014)
|
||||
|
||||
**Principe** : IPFS (InterPlanetary File System) est un système de fichiers distribué adressé par contenu. Chaque bloc de données est identifié par son hash cryptographique (Content Identifier, CID), permettant une vérification d'intégrité intrinsèque et une déduplication automatique. La couche de découverte repose sur une DHT Kademlia publique [Maymounkov & Mazières, 2002] implémentée dans libp2p.
|
||||
|
||||
**Forces** :
|
||||
- Architecture décentralisée mature, déployée à très large échelle
|
||||
- Vérification d'intégrité native par adressage de contenu
|
||||
- Écosystème libp2p modulaire (transports multiples : TCP, QUIC, WebTransport, Bluetooth)
|
||||
- Comunauté active, documentation extensive
|
||||
- Support natif pour les connexions multi-transport et le NAT traversal
|
||||
|
||||
**Faiblesses** :
|
||||
- Aucune confidentialité native : les CIDs dans la DHT publique révèlent quels contenus sont recherchés et partagés
|
||||
- Aucun contrôle d'accès à la couche de découverte : tout nœud peut rejoindre la DHT publique
|
||||
- Résistance Sybil non résolue dans la DHT publique
|
||||
- Pas de mécanisme de confiance ou de scoring des pairs
|
||||
- Latences de découverte incompatibles avec les contraintes DTN sévères
|
||||
|
||||
**Compatibilité GARDEN** : Inadapté en l'état. La DHT publique IPFS est une source de fuite d'information massive dans un contexte hostile. Le fait qu'un ensemble de nœuds recherche des ressources de type spécifique révèle des informations stratégiques à tout observateur participant à la DHT. Des couches additionnelles de confidentialité (chiffrement du contenu, utilisation d'une DHT privée avec espace de noms isolé) peuvent rendre libp2p utilisable comme couche de transport, mais IPFS dans sa forme standard est inadapté.
|
||||
|
||||
### 2.2 Filecoin (Protocol Labs, 2017)
|
||||
|
||||
**Principe** : Filecoin est un marché de stockage décentralisé construit sur IPFS. Les fournisseurs de stockage s'engagent à stocker des données en soumettant des preuves cryptographiques : Proof of Replication (PoRep) prouve qu'une copie unique d'un fichier est bien stockée, Proof of Spacetime (PoSt) prouve que le stockage est maintenu dans le temps. Le marché de stockage est réglé via des smart contracts sur la chaîne Filecoin.
|
||||
|
||||
**Forces** :
|
||||
- Incentives cryptoéconomiques alignant les comportements des fournisseurs avec les intérêts des utilisateurs
|
||||
- Preuves cryptographiques vérifiables de l'exécution du stockage
|
||||
- Décentralisé avec un marché libre de capacité
|
||||
|
||||
**Faiblesses** :
|
||||
- Transactions on-chain sur Filecoin entièrement publiques : les accords de stockage révèlent qui stocke quoi pour qui
|
||||
- Latences élevées incompatibles avec les workloads temps-réel
|
||||
- Complexité des preuves cryptographiques : charge de calcul significative pour les nœuds
|
||||
- Pas de gestion de confiance relative entre pairs ; pas de consortium
|
||||
|
||||
**Compatibilité GARDEN** : Le modèle de preuves cryptographiques de consommation de ressources est conceptuellement pertinent et constitue une inspiration pour la couche d'attestation de la proposition. Cependant, la transparence des transactions Filecoin on-chain et l'absence de confidentialité native rendent ce système inadapté au contexte hostile.
|
||||
|
||||
### 2.3 BitTorrent DHT (Loewenstern & Norberg, 2008)
|
||||
|
||||
**Principe** : Le protocole BitTorrent DHT (BEP 5) implémente une DHT Kademlia pour la découverte de pairs partageant des torrents. Chaque nœud maintient une table de routage de pairs proches dans l'espace XOR de Kademlia, permettant de localiser des pairs ayant annoncé un torrent spécifique.
|
||||
|
||||
**Forces** :
|
||||
- Système très mature, déployé à des centaines de millions de nœuds
|
||||
- Résilience exceptionnelle démontrée à grande échelle
|
||||
- Protocole simple et bien documenté
|
||||
|
||||
**Faiblesses** :
|
||||
- Aucune authentification des nœuds : toute clé publique peut être annoncée par n'importe qui
|
||||
- Aucune confidentialité : les requêtes DHT révèlent quels contenus sont recherchés
|
||||
- Aucun mécanisme de confiance ou de réputation
|
||||
- Résistance Sybil nulle
|
||||
|
||||
**Compatibilité GARDEN** : Inadapté. BitTorrent DHT constitue une référence historique utile pour comprendre les propriétés de scalabilité et de résilience des DHT Kademlia, mais ses propriétés de sécurité sont incompatibles avec tout contexte hostile.
|
||||
|
||||
### 2.4 Tor (Dingledine, Mathewson & Syverson, 2004)
|
||||
|
||||
**Principe** : Tor (The Onion Router) est un réseau d'anonymisation par routage en oignon : chaque message est chiffré en plusieurs couches et routé à travers trois nœuds relais (entry guard, middle relay, exit node), de sorte qu'aucun relais unique ne connaît à la fois la source et la destination d'un message. Les services cachés (hidden services, .onion) permettent d'opérer des serveurs sans révéler leur adresse IP.
|
||||
|
||||
**Forces** :
|
||||
- Confidentialité de transport forte, résistance démontrée contre les observateurs passifs non globaux
|
||||
- Résistance à la surveillance ciblée sur les liaisons intermédiaires
|
||||
- Écosystème bien documenté, client libre largement déployé
|
||||
|
||||
**Faiblesses** :
|
||||
- Latences élevées (100–500ms en moyenne) introduites par le routage multi-sauts
|
||||
- Vulnérable aux attaques par analyse de trafic global (corrélation temporelle) [Murdoch & Danezis, 2005]
|
||||
- Pas de découverte décentralisée de pairs : les directory authorities constituent un point centralisé
|
||||
- Pas de transactions monétaires ni de smart contracts
|
||||
- Connectivité TCP continue requise : incompatible avec les contraintes DTN
|
||||
|
||||
**Compatibilité GARDEN** : Pertinent comme couche de transport pour masquer les métadonnées de communication entre nœuds, en particulier sur les liens exposés à des observateurs passifs. Cependant, Tor ne constitue pas une infrastructure complète pour un système de découverte et de marché de ressources.
|
||||
|
||||
### 2.5 I2P (Invisible Internet Project)
|
||||
|
||||
**Principe** : I2P est un réseau mixnet utilisant le routage en ail (garlic routing), une variante du routage en oignon où plusieurs messages sont agrégés dans une même "gousse d'ail" pour réduire la corrélation temporelle. I2P est plus décentralisé que Tor (pas de directory authorities) et optimisé pour la communication bidirectionnelle (trafic interne au réseau).
|
||||
|
||||
**Forces** :
|
||||
- Plus décentralisé que Tor (distributed network database)
|
||||
- Meilleure résistance à l'analyse de trafic pour le trafic bidirectionnel
|
||||
- Réseau auto-suffisant sans dépendance aux directory authorities
|
||||
|
||||
**Faiblesses** :
|
||||
- Écosystème et communauté plus petits que Tor
|
||||
- Performances de latence comparables à Tor
|
||||
- Documentation moins développée
|
||||
- Pas adapté aux contraintes DTN
|
||||
|
||||
**Compatibilité GARDEN** : Alternative crédible à Tor pour la couche de transport anonymisant, avec l'avantage d'une moindre dépendance à des points centralisés. Comme Tor, ne constitue pas une infrastructure complète.
|
||||
|
||||
---
|
||||
|
||||
## 3. Systèmes de Computation et Marketplace Décentralisée
|
||||
|
||||
### 3.1 Golem (Golem Network, 2016)
|
||||
|
||||
**Principe** : Golem est une marketplace de compute décentralisée basée sur Ethereum. Les fournisseurs de ressources (requestors) soumettent des tâches de calcul définies dans un format standardisé ; les prestataires (providers) les exécutent et reçoivent un paiement en tokens GLM. La vérification du bon accomplissement repose sur des challenges cryptographiques sur les résultats.
|
||||
|
||||
**Forces** :
|
||||
- Marché libre de ressources de calcul
|
||||
- Preuve de travail challengeable pour vérification des résultats
|
||||
- Décentralisé, pas d'opérateur central
|
||||
|
||||
**Faiblesses** :
|
||||
- Transactions Ethereum entièrement publiques : qui paie qui, pour quel type de tâche, avec quel montant
|
||||
- Latences du réseau Ethereum (finalité probabiliste) incompatibles avec les workloads temps-réel
|
||||
- Pas de support DTN
|
||||
- Pas de gestion de confiance relative ni de consortiums
|
||||
- Pas de confidentialité des tâches exécutées
|
||||
|
||||
**Compatibilité GARDEN** : Insuffisant pour un contexte hostile. Le modèle de marketplace de compute est conceptuellement pertinent, mais l'absence de confidentialité et la dépendance à Ethereum public rendent ce système inadapté.
|
||||
|
||||
### 3.2 Akash Network
|
||||
|
||||
**Principe** : Akash Network est un marché de déploiement Kubernetes décentralisé basé sur le SDK Cosmos. Les fournisseurs de compute publient leurs offres de ressources Kubernetes ; les utilisateurs soumettent des manifestes de déploiement standardisés (SDL : Stack Definition Language) ; un mécanisme d'enchère inverse attribue les déploiements aux fournisseurs les moins coûteux.
|
||||
|
||||
**Forces** :
|
||||
- Architecturalement compatible avec Kubernetes (outil standard d'orchestration de conteneurs)
|
||||
- Finalité rapide via Tendermint BFT (~6 secondes)
|
||||
- Marché de déploiement décentralisé avec enchères transparentes
|
||||
- Écosystème Cosmos avec interopérabilité IBC
|
||||
|
||||
**Faiblesses** :
|
||||
- Transactions publiques sur la chaîne Akash : les déploiements sont entièrement visibles
|
||||
- Modèle de confiance binaire (fournisseur certifié / non certifié)
|
||||
- Pas de confidentialité native des workloads déployés
|
||||
- Pas de support DTN
|
||||
- Gouvernance de confiance limitée : pas de gestion de consortiums à niveaux de confiance
|
||||
|
||||
**Compatibilité GARDEN** : Architecturalement proche de ce que GARDEN requiert (Kubernetes décentralisé), mais manque les couches de confidentialité et de gestion de confiance relative indispensables. L'interopérabilité Cosmos constitue un point de compatibilité potentiel.
|
||||
|
||||
### 3.3 iExec (2017)
|
||||
|
||||
**Principe** : iExec est une marketplace de compute décentralisée intégrant des Trusted Execution Environments (TEE) Intel SGX pour l'exécution confidentielle des tâches. Les smart contracts Ethereum gèrent le cycle de vie des tâches et les paiements en tokens RLC. Le TEE garantit que ni le fournisseur de compute ni aucun observateur ne peut accéder au contenu des données traitées.
|
||||
|
||||
**Forces** :
|
||||
- Exécution confidentielle native via Intel SGX : le fournisseur de compute ne voit pas les données traitées
|
||||
- Marketplace décentralisée avec attestations TEE
|
||||
- Smart contracts Ethereum pour l'automatisation du paiement
|
||||
|
||||
**Faiblesses** :
|
||||
- Dépendance critique à Intel SGX : vulnérabilités de canal latéral documentées (Spectre-NG, SGAxe, LVI) [Costan & Devadas, 2016]
|
||||
- Transactions de marketplace publiques sur Ethereum L1
|
||||
- Pas de support DTN
|
||||
- Gouvernance centralisée du réseau TEE
|
||||
|
||||
**Compatibilité GARDEN** : Le modèle TEE pour l'exécution confidentielle est pertinent pour le contexte GARDEN, particulièrement pour l'attestation de consommation de ressources (verrou oracle). Cependant, la dépendance à Intel SGX constitue une dépendance matérielle problématique pour des nœuds embarqués spatiaux dont la chaîne d'approvisionnement peut être compromise.
|
||||
|
||||
### 3.4 Ocean Protocol (2019)
|
||||
|
||||
**Principe** : Ocean Protocol est un marché décentralisé de données basé sur Ethereum. Le mécanisme "Compute-to-Data" permet à un fournisseur de données de proposer l'accès à ses données pour du calcul sans jamais les transférer : le calcul s'exécute au plus près des données, et seul le résultat est retourné au demandeur.
|
||||
|
||||
**Forces** :
|
||||
- Modèle Compute-to-Data respectant la souveraineté du fournisseur de données
|
||||
- Tokenisation des actifs de données (datatokens ERC-20)
|
||||
- Décentralisé, écosystème actif
|
||||
|
||||
**Faiblesses** :
|
||||
- Transactions de marketplace publiques sur Ethereum
|
||||
- Confiance basée sur des smart contracts audités, pas sur des preuves cryptographiques d'exécution
|
||||
- Pas de support DTN
|
||||
- Pas de gestion de consortiums à confiance différenciée
|
||||
|
||||
**Compatibilité GARDEN** : Le modèle Compute-to-Data constitue une approche intéressante pour préserver la souveraineté des données dans un marché décentralisé. Cependant, la transparence Ethereum et l'absence de gestion de la confiance relative limitent son applicabilité directe.
|
||||
|
||||
---
|
||||
|
||||
## 4. Systèmes Blockchain
|
||||
|
||||
### 4.1 Bitcoin (Nakamoto, 2008)
|
||||
|
||||
**Principe** : Bitcoin est le premier système de règlement monétaire pair-à-pair sans tiers de confiance. La sécurité repose sur une preuve de travail (Proof of Work) permettant d'atteindre un consensus sur l'ordre des transactions sans autorité centrale. La finalité est probabiliste : une transaction est considérée irréversible après ~6 confirmations (~1 heure).
|
||||
|
||||
**Forces** :
|
||||
- Sécurité éprouvée sur 15+ ans d'opération continue sans compromission protocolaire
|
||||
- Décentralisation maximale (aucun opérateur central)
|
||||
- Résistance à la censure démontrée
|
||||
|
||||
**Faiblesses** :
|
||||
- Pas de smart contracts expressifs (Script très limité)
|
||||
- Latence finale (~1 heure) incompatible avec les contraintes opérationnelles
|
||||
- Transactions pseudonymes traçables par analyse de graphe [Meiklejohn et al., 2013]
|
||||
- Proof of Work : consommation énergétique prohibitive pour contexte embarqué
|
||||
- Pas de gestion de consortiums
|
||||
|
||||
**Compatibilité GARDEN** : Inadapté pour le règlement programmatique et la confidentialité. Valeur de référence théorique uniquement.
|
||||
|
||||
### 4.2 Ethereum (Buterin, 2014 ; Wood, 2014)
|
||||
|
||||
**Principe** : Ethereum est la première plateforme de smart contracts à vocation généraliste. La machine virtuelle EVM (Ethereum Virtual Machine) exécute des programmes Turing-complets encodés dans des transactions on-chain. La migration vers Proof of Stake (The Merge, 2022) a réduit la consommation énergétique de ~99.95%.
|
||||
|
||||
**Forces** :
|
||||
- EVM : plateforme de smart contracts la plus mature, écosystème DeFi massif
|
||||
- Large écosystème de développeurs et d'outils
|
||||
- L2 émergents (ZK-rollups, Optimistic rollups) améliorant scalabilité et potentiellement confidentialité
|
||||
|
||||
**Faiblesses** :
|
||||
- Transparence totale : toutes les transactions et les états des smart contracts sont publics
|
||||
- Phénomène MEV (Miner/Maximal Extractable Value) créant des distorsions de marché [Daian et al., 2020]
|
||||
- Finalité probabiliste sur L1 (~12s pour la finalité slot, ~15 minutes pour finalité économique forte)
|
||||
- Pas de gestion native des consortiums
|
||||
- L2 ZK confidentiels encore en développement actif
|
||||
|
||||
**Compatibilité GARDEN** : Inadapté en L1 seul. Les L2 ZK confidentiels (Aztec) constituent un axe de développement prometteur mais dont la maturité est insuffisante pour un déploiement opérationnel en 2026. Pertinent comme substrate de référence pour l'écosystème EVM.
|
||||
|
||||
### 4.3 Hyperledger Fabric (Androulaki et al., 2018)
|
||||
|
||||
**Principe** : Hyperledger Fabric est une blockchain permissionnée conçue pour les consortiums d'entreprises. Son architecture distingue les orderers (responsables de l'ordre des transactions), les peers (exécutant les chaincode et maintenant le ledger), et les MSP (Membership Service Providers, responsables de l'identité des membres). Les canaux privés permettent à des sous-groupes de membres d'effectuer des transactions confidentielles vis-à-vis du reste du réseau.
|
||||
|
||||
**Forces** :
|
||||
- Canaux privés : confidentialité native pour les sous-consortiums
|
||||
- Finalité déterministe : pas de forks possibles avec Raft ou PBFT
|
||||
- Modular consensus : le mécanisme de consensus est paramétrable selon les besoins
|
||||
- Chaincode (smart contracts) en langages standards (Go, Java, Node.js)
|
||||
- Gouvernance fine via les MSP
|
||||
|
||||
**Faiblesses** :
|
||||
- Permissioned : requiert de faire confiance aux opérateurs des MSP (pas de trustless)
|
||||
- Gouvernance centralisée par essence : il faut faire confiance à la structure d'administration
|
||||
- Pas de connectivité intermittente supportée nativement : TCP continu requis entre peers et orderers
|
||||
- Complexité d'administration élevée
|
||||
|
||||
**Compatibilité GARDEN** : Très adapté pour les consortiums fermés (intra-consortium dans la proposition). La finalité déterministe et les canaux privés en font le candidat le plus mature pour le règlement intra-consortium. La dépendance à une connectivité TCP continue constitue le principal gap avec les contraintes DTN.
|
||||
|
||||
### 4.4 Cosmos / IBC (Kwon & Buchman, 2016)
|
||||
|
||||
**Principe** : Cosmos est un écosystème de blockchains souveraines et interopérables. Chaque "zone" est une blockchain indépendante avec son propre consensus (généralement Tendermint BFT), sa propre gouvernance et ses propres smart contracts (CosmWasm). Les zones communiquent via le protocole IBC (Inter-Blockchain Communication), permettant des transferts de tokens et de messages entre blockchains hétérogènes.
|
||||
|
||||
**Forces** :
|
||||
- Zones souveraines : chaque consortium peut opérer sa propre blockchain avec sa propre gouvernance
|
||||
- Finalité rapide : Tendermint BFT offre une finalité déterministe en ~6 secondes
|
||||
- IBC : interopérabilité standardisée entre zones
|
||||
- CosmWasm : smart contracts en Rust avec sécurité mémoire accrue
|
||||
- Écosystème large et actif
|
||||
|
||||
**Faiblesses** :
|
||||
- Pas de confidentialité native : les transactions IBC sont visibles dans les logs des deux zones impliquées
|
||||
- Modèle de confiance limité entre zones : pas de mécanisme standardisé pour la confiance relative
|
||||
- Complexité d'administration d'une zone Cosmos
|
||||
|
||||
**Compatibilité GARDEN** : Excellente base pour une architecture multi-consortium. Chaque consortium peut opérer sa propre zone avec sa gouvernance, et les échanges inter-consortium transitent via IBC. L'absence de confidentialité native est compensable par des solutions applicatives (chiffrement des données avant soumission on-chain) ou par l'utilisation de zones confidentielles (Secret Network, zone Cosmos).
|
||||
|
||||
### 4.5 Secret Network
|
||||
|
||||
**Principe** : Secret Network est une blockchain basée sur Cosmos utilisant des enclaves TEE Intel SGX pour l'exécution confidentielle des smart contracts. Les inputs des transactions, les états des smart contracts, et les outputs sont chiffrés pour les validateurs eux-mêmes : seule la logique du contrat est publique. Les développeurs écrivent des "secret contracts" en CosmWasm avec des annotations de confidentialité.
|
||||
|
||||
**Forces** :
|
||||
- Smart contracts confidentiels natifs : aucun validateur ne peut accéder aux données en clair
|
||||
- Compatibilité Cosmos : interopérabilité via IBC avec l'écosystème Cosmos
|
||||
- Cas d'usage variés : DeFi privé, DAO confidentielles, bridges confidentiels
|
||||
- Finalité Tendermint (~6 secondes)
|
||||
|
||||
**Faiblesses** :
|
||||
- Dépendance critique à Intel SGX : vulnérabilités de canal latéral potentielles [Costan & Devadas, 2016]
|
||||
- Pas de garantie de confidentialité si SGX est compromis
|
||||
- Complexité de développement supérieure aux smart contracts standard
|
||||
- Écosystème plus petit que Ethereum ou Cosmos
|
||||
|
||||
**Compatibilité GARDEN** : Bon candidat pour le règlement inter-consortium confidentiel dans la proposition. La dépendance à Intel SGX constitue une limitation pour les nœuds embarqués spatiaux dont la chaîne d'approvisionnement matérielle peut être compromise ou dont la disponibilité de matériel certifié SGX est incertaine.
|
||||
|
||||
### 4.6 Zcash (Ben-Sasson et al., 2014 ; Hopwood et al., 2016)
|
||||
|
||||
**Principe** : Zcash est une blockchain de paiements confidentiels utilisant les zk-SNARKs (Zero-Knowledge Succinct Non-interactive ARguments of Knowledge) pour masquer cryptographiquement les montants transférés et les adresses des parties dans les transactions "shielded". Contrairement aux solutions TEE, la confidentialité repose uniquement sur des primitives cryptographiques, sans dépendance matérielle.
|
||||
|
||||
**Forces** :
|
||||
- Confidentialité cryptographique pure, sans dépendance matérielle
|
||||
- zk-SNARKs vérifiables par n'importe quel nœud en O(1) de calcul
|
||||
- Technologie ZK mature, référence académique et industrielle
|
||||
- Pas de PoW sur la nouvelle architecture (Sapling, Orchard)
|
||||
|
||||
**Faiblesses** :
|
||||
- Pas de smart contracts complexes : le langage Script de Zcash est très limité
|
||||
- Les pools shielded sont sous-utilisés (majorité des transactions non-shielded), réduisant l'anonymat par effet d'ensemble
|
||||
- Gouvernance fortement centralisée par l'Electric Coin Company
|
||||
- Pas adapté pour automatiser le règlement conditionnel d'un marché de ressources
|
||||
|
||||
**Compatibilité GARDEN** : Excellent pour les paiements confidentiels simples entre parties connues. L'absence de smart contracts expressifs limite l'automatisation du règlement dans un marché de ressources complexe. Pertinent comme couche de paiement pour des cas d'usage spécifiques ne requérant pas de logique contractuelle.
|
||||
|
||||
### 4.7 Oasis Network (Oasis Labs, 2020)
|
||||
|
||||
**Principe** : Oasis Network combine TEE et blockchain en proposant des "ParaTimes" — des environnements d'exécution parallèles avec des niveaux de confidentialité configurables. Le Sapphire ParaTime offre un EVM confidentiel (les états des smart contracts sont chiffrés dans SGX), permettant d'exécuter des smart contracts Solidity avec confidentialité des données internes.
|
||||
|
||||
**Forces** :
|
||||
- EVM confidentiel : compatibilité avec l'écosystème Ethereum + confidentialité
|
||||
- Architecture modulaire (multiple ParaTimes avec différents niveaux de sécurité)
|
||||
- Compatibilité Cosmos-adjacent
|
||||
- Séparation consensus et exécution : scalabilité accrue
|
||||
|
||||
**Faiblesses** :
|
||||
- Dépendance à Intel SGX pour les ParaTimes confidentiels
|
||||
- Écosystème plus jeune que Secret Network ou Ethereum
|
||||
- Complexité d'architecture (multiple ParaTimes à comprendre et gérer)
|
||||
|
||||
**Compatibilité GARDEN** : Bon compromis entre smart contracts expressifs (EVM) et confidentialité (TEE). La dépendance SGX est la même limitation que pour Secret Network. La compatibilité EVM facilite le portage de smart contracts existants.
|
||||
|
||||
---
|
||||
|
||||
## 5. Tableau de Comparaison Synthétique
|
||||
|
||||
| Système | Décentralisation | Confidentialité | Sybil-résistance | Tolérance DTN | Finalité | Smart contracts | Séparation découverte/règlement | Consortium | Maturité | Embarqué |
|
||||
|---|---|---|---|---|---|---|---|---|---|---|
|
||||
| IPFS / Libp2p | ✓✓ | ✗ | ✗ | △ | N/A | N/A | ✗ (découverte seule) | ✗ | ✓✓ | △ |
|
||||
| Filecoin | ✓✓ | ✗ | △ | ✗ | △ | △ | △ (discovery + storage couplés) | ✗ | ✓ | ✗ |
|
||||
| BitTorrent DHT | ✓✓ | ✗ | ✗ | △ | N/A | N/A | ✗ (découverte seule) | ✗ | ✓✓ | △ |
|
||||
| Tor | ✓ | ✓✓ | △ | ✗ | N/A | N/A | N/A (transport uniquement) | ✗ | ✓✓ | ✗ |
|
||||
| I2P | ✓✓ | ✓✓ | △ | ✗ | N/A | N/A | N/A (transport uniquement) | ✗ | ✓ | ✗ |
|
||||
| Golem | ✓ | ✗ | △ | ✗ | △ | △ | ✗ (couplé Ethereum) | ✗ | ✓ | ✗ |
|
||||
| Akash Network | ✓ | ✗ | △ | ✗ | ✓✓ | △ | ✗ (couplé Cosmos) | △ | ✓ | △ |
|
||||
| iExec | ✓ | ✓ (TEE) | △ | ✗ | △ | △ | ✗ (couplé Ethereum) | ✗ | ✓ | ✗ |
|
||||
| Ocean Protocol | ✓ | △ | △ | ✗ | △ | △ | ✗ (couplé Ethereum) | ✗ | ✓ | ✗ |
|
||||
| Bitcoin | ✓✓ | ✗ | ✓✓ | ✗ | ✗ | ✗ | N/A (règlement seul) | ✗ | ✓✓ | ✗ |
|
||||
| Ethereum L1 | ✓✓ | ✗ | ✓ | ✗ | △ | ✓✓ | N/A (règlement seul) | ✗ | ✓✓ | ✗ |
|
||||
| Ethereum L2 ZK | ✓ | ✓ | ✓ | ✗ | △ | ✓ | N/A (règlement seul) | ✗ | △ | ✗ |
|
||||
| Hyperledger Fabric | △ | ✓✓ | ✓✓ | ✗ | ✓✓ | ✓✓ | N/A (règlement seul) | ✓✓ | ✓✓ | △ |
|
||||
| Cosmos SDK + IBC | ✓✓ | △ | ✓ | ✗ | ✓✓ | ✓ | N/A (règlement seul) | ✓✓ | ✓✓ | △ |
|
||||
| Secret Network | ✓ | ✓✓ | ✓ | ✗ | ✓✓ | ✓✓ | N/A (règlement seul) | ✓ | ✓ | △ |
|
||||
| Zcash | ✓ | ✓✓ | ✓ | ✗ | △ | ✗ | N/A (règlement seul) | ✗ | ✓✓ | ✗ |
|
||||
| Oasis Network | ✓ | ✓✓ | ✓ | ✗ | ✓✓ | ✓✓ | N/A (règlement seul) | △ | ✓ | △ |
|
||||
| **Proposition** | **✓✓** | **✓✓** | **✓** | **✓✓** | **✓✓** | **✓✓** | **✓✓ (3 plans distincts)** | **✓✓** | **△** | **✓✓** |
|
||||
|
||||
*Notation : ✓✓ excellent, ✓ bon, △ partiel ou conditionnel, ✗ insuffisant, N/A non applicable*
|
||||
|
||||
> **Lecture de la colonne "Séparation découverte/règlement"** : Les systèmes notés N/A n'implémentent qu'une seule des deux fonctions (découverte ou règlement), sans prétendre à l'autre. Les systèmes notés ✗ couplent les deux dans un même protocole ou une même chaîne, créant des compromis défavorables. Seule la proposition formalise explicitement la séparation en trois plans indépendants avec des profils CAP distincts.
|
||||
|
||||
---
|
||||
|
||||
## 6. Positionnement de la Proposition par Rapport à l'Existant
|
||||
|
||||
### 6.1 L'Absence d'une Solution Intégrée dans l'État de l'Art
|
||||
|
||||
L'analyse comparative révèle une lacune structurelle dans l'état de l'art : **aucun système existant ne combine simultanément l'ensemble des propriétés requises par le contexte GARDEN**. Les systèmes actuels se spécialisent sur un sous-ensemble de propriétés, laissant des gaps critiques selon leur domaine de conception.
|
||||
|
||||
Les systèmes de découverte P2P (IPFS, libp2p, BitTorrent DHT) excellent en décentralisation et en scalabilité mais ignorent la confidentialité et la gestion de confiance. Les réseaux d'anonymisation (Tor, I2P) offrent une excellente confidentialité de transport mais ne fournissent ni découverte décentralisée, ni transactions monétaires, ni tolérance DTN. Les blockchains de consortium (Hyperledger Fabric) offrent confidentialité intra-consortium et finalité déterministe mais supposent une connectivité continue et ne s'intègrent pas avec une couche de découverte P2P. Les blockchains confidentielles (Secret Network, Oasis) offrent des smart contracts confidentiels mais présentent des dépendances matérielles problématiques et ne traitent pas la couche de découverte.
|
||||
|
||||
### 6.2 La Proposition comme Synthèse Architecturale
|
||||
|
||||
La proposition se distingue de tous les systèmes existants par sa nature de **synthèse architecturale multi-couche**, combinant :
|
||||
|
||||
1. **Couche de découverte** : DHT Kademlia privée (inspiration libp2p) + scoring comportemental multidimensionnel (absent de tous les systèmes existants) + adaptation DTN (absent de tous les systèmes existants) + gossip épidémique SWIM-adjacent [Das et al., 2002].
|
||||
|
||||
2. **Couche de données** : enregistrements signés + chiffrement de contenu bout-en-bout + attestations ZK (inspiration Filecoin PoSt, mais avec confidentialité cryptographique) + Compute-to-Data (inspiration Ocean Protocol, mais avec preuves ZK).
|
||||
|
||||
3. **Couche de règlement** : architecture hybride intra/inter-consortium (Hyperledger Fabric + Secret Network / Cosmos + Secret Network) + gestion de consortiums à niveaux de confiance différenciés (absent de tous les systèmes existants comme propriété native).
|
||||
|
||||
### 6.3 La Tolérance DTN : Le Gap le Plus Universel
|
||||
|
||||
La tolérance aux réseaux DTN est la propriété la plus universellement absente des systèmes existants. Tous les systèmes analysés supposent implicitement ou explicitement une connectivité TCP continue :
|
||||
- Les DHT Kademlia supposent une disponibilité suffisante des pairs pour maintenir les tables de routage
|
||||
- Les blockchains supposent une connectivité entre validateurs pour atteindre le quorum de consensus
|
||||
- Les systèmes de marketplace supposent la disponibilité en temps quasi-réel des parties contractantes
|
||||
|
||||
La proposition adresse ce gap en adaptant chaque couche aux contraintes DTN : TTL adaptatifs, stockage-et-retransmission, signature différée, heartbeats asynchrones accumulés. Cette adaptation systématique est, à notre connaissance, absente de tout système de marché de ressources décentralisé existant.
|
||||
|
||||
---
|
||||
|
||||
## Références Bibliographiques
|
||||
|
||||
**[Androulaki et al., 2018]** Androulaki, E., et al. (2018). Hyperledger Fabric: a distributed operating system for permissioned blockchains. In *Proceedings of the 13th EuroSys Conference*, article 30.
|
||||
|
||||
**[Ben-Sasson et al., 2014]** Ben-Sasson, E., Chiesa, A., Garman, C., Green, M., Miers, I., Tromer, E., & Virza, M. (2014). Zerocash: Decentralized Anonymous Payments from Bitcoin. In *Proceedings of the 2014 IEEE S&P*, pp. 459–474.
|
||||
|
||||
**[Benet, 2014]** Benet, J. (2014). IPFS - Content Addressed, Versioned, P2P File System. *arXiv preprint* arXiv:1407.3561.
|
||||
|
||||
**[Brewer, 2000]** Brewer, E. A. (2000). Towards robust distributed systems. In *Proceedings of PODC 2000*, pp. 7.
|
||||
|
||||
**[Buterin, 2014]** Buterin, V. (2014). *A Next-Generation Smart Contract and Decentralized Application Platform*. Ethereum Whitepaper. https://ethereum.org/whitepaper
|
||||
|
||||
**[Cerf et al., 2007]** Cerf, V., et al. (2007). Delay-Tolerant Networking Architecture. *RFC 4838*, IETF.
|
||||
|
||||
**[Costan & Devadas, 2016]** Costan, V., & Devadas, S. (2016). Intel SGX Explained. *IACR Cryptology ePrint Archive*, Report 2016/086.
|
||||
|
||||
**[Daian et al., 2020]** Daian, P., et al. (2020). Flash Boys 2.0. In *Proceedings of the 2020 IEEE S&P*, pp. 910–927.
|
||||
|
||||
**[Das et al., 2002]** Das, A., Gupta, I., & Motivala, A. (2002). SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol. In *Proceedings of DSN 2002*, pp. 303–312.
|
||||
|
||||
**[Dingledine, Mathewson & Syverson, 2004]** Dingledine, R., Mathewson, N., & Syverson, P. (2004). Tor: The Second-Generation Onion Router. In *Proceedings of the 13th USENIX Security Symposium*, pp. 303–320.
|
||||
|
||||
**[Douceur, 2002]** Douceur, J. R. (2002). The Sybil Attack. In *Proceedings of IPTPS 2002*, LNCS 2429, pp. 251–260.
|
||||
|
||||
**[Fall, 2003]** Fall, K. (2003). A delay-tolerant network architecture for challenged internets. In *Proceedings of SIGCOMM 2003*, pp. 27–34.
|
||||
|
||||
**[Fischer, Lynch & Paterson, 1985]** Fischer, M. J., Lynch, N. A., & Paterson, M. S. (1985). Impossibility of distributed consensus with one faulty process. *JACM*, 32(2), 374–382.
|
||||
|
||||
**[Gilbert & Lynch, 2002]** Gilbert, S., & Lynch, N. (2002). Brewer's conjecture and the feasibility of consistent, available, partition-tolerant web services. *ACM SIGACT News*, 33(2), 51–59.
|
||||
|
||||
**[Goldwasser, Micali & Rackoff, 1989]** Goldwasser, S., Micali, S., & Rackoff, C. (1989). The knowledge complexity of interactive proof systems. *SIAM Journal on Computing*, 18(1), 186–208.
|
||||
|
||||
**[Golem Network, 2016]** Golem Network (2016). *Golem: A decentralized computation network*. Whitepaper. https://golem.network/golem_whitepaper.pdf
|
||||
|
||||
**[Groth, 2016]** Groth, J. (2016). On the size of pairing-based non-interactive arguments. In *Proceedings of EUROCRYPT 2016*, LNCS 9666, pp. 305–326.
|
||||
|
||||
**[Heilman et al., 2015]** Heilman, E., Kendler, A., Zohar, A., & Goldberg, S. (2015). Eclipse Attacks on Bitcoin's Peer-to-Peer Network. In *Proceedings of the 24th USENIX Security Symposium*, pp. 129–144.
|
||||
|
||||
**[Hopwood et al., 2016]** Hopwood, D., Bowe, S., Hornby, T., & Wilcox, N. (2016). *Zcash Protocol Specification*. Electric Coin Company.
|
||||
|
||||
**[Kwon & Buchman, 2016]** Kwon, J., & Buchman, E. (2016). *Cosmos: A Network of Distributed Ledgers*. Whitepaper.
|
||||
|
||||
**[Lamport, Shostak & Pease, 1982]** Lamport, L., Shostak, R., & Pease, M. (1982). The Byzantine Generals Problem. *ACM TOPLAS*, 4(3), 382–401.
|
||||
|
||||
**[Loewenstern & Norberg, 2008]** Loewenstern, A., & Norberg, A. (2008). *DHT Protocol (BEP 5)*. BitTorrent Enhancement Proposal.
|
||||
|
||||
**[Luu et al., 2016]** Luu, L., Chu, D.-H., Olickel, H., Saxena, P., & Hobor, A. (2016). Making Smart Contracts Smarter. In *Proceedings of CCS 2016*, pp. 254–269.
|
||||
|
||||
**[Maymounkov & Mazières, 2002]** Maymounkov, P., & Mazières, D. (2002). Kademlia: A Peer-to-Peer Information System Based on the XOR Metric. In *Proceedings of IPTPS 2002*, LNCS 2429, pp. 53–65.
|
||||
|
||||
**[Meiklejohn et al., 2013]** Meiklejohn, S., et al. (2013). A Fistful of Bitcoins. In *Proceedings of IMC 2013*, pp. 127–140.
|
||||
|
||||
**[Murdoch & Danezis, 2005]** Murdoch, S. J., & Danezis, G. (2005). Low-Cost Traffic Analysis of Tor. In *Proceedings of the 2005 IEEE S&P*, pp. 183–195.
|
||||
|
||||
**[Nakamoto, 2008]** Nakamoto, S. (2008). Bitcoin: A Peer-to-Peer Electronic Cash System. https://bitcoin.org/bitcoin.pdf
|
||||
|
||||
**[Oasis Labs, 2020]** Oasis Network (2020). *Oasis Network Primer*. https://oasisprotocol.org/primer
|
||||
|
||||
**[Perrin, 2018]** Perrin, T. (2018). *The Noise Protocol Framework*. https://noiseprotocol.org/noise.pdf
|
||||
|
||||
**[Protocol Labs, 2017]** Protocol Labs (2017). *Filecoin: A Decentralized Storage Network*. Whitepaper.
|
||||
|
||||
**[Stoica et al., 2003]** Stoica, I., et al. (2003). Chord: A scalable peer-to-peer lookup protocol for internet applications. *IEEE/ACM Transactions on Networking*, 11(1), 17–32.
|
||||
|
||||
**[Sun et al., 2017]** Sun, S.-F., Au, M. H., Liu, J. K., & Yuen, T. H. (2017). RingCT 2.0. In *Proceedings of ESORICS 2017*, LNCS 10493, pp. 456–474.
|
||||
|
||||
**[Szabo, 1997]** Szabo, N. (1997). Formalizing and securing relationships on public networks. *First Monday*, 2(9).
|
||||
|
||||
**[W3C, 2022]** W3C (2022). *Decentralized Identifiers (DIDs) v1.0*. https://www.w3.org/TR/did-core/
|
||||
|
||||
**[Wood, 2014]** Wood, G. (2014). *Ethereum: A Secure Decentralised Generalised Transaction Ledger*. Ethereum Yellow Paper.
|
||||
|
||||
**[Wood, 2016]** Wood, G. (2016). *Polkadot: Vision for a Heterogeneous Multi-Chain Framework*. Whitepaper.
|
||||
406
docs/NOTE_DIVERGENCE_SYSTEME_REFERENCE.md
Normal file
406
docs/NOTE_DIVERGENCE_SYSTEME_REFERENCE.md
Normal file
@@ -0,0 +1,406 @@
|
||||
# Note de Divergence : Proposition Architecturale vs État du Système de Référence
|
||||
|
||||
**Type** : Note de travail interne — analyse des écarts et feuille de route
|
||||
**Version** : 1.0 — Mars 2026
|
||||
**Relation** : Ce document est le complément opérationnel du document `PROPOSITION_ARCHITECTURE_MINIMALE.md` et de `COMPARAISON_SYSTEMES_ET_PROPOSITION.md`. Il ne constitue pas une étude scientifique indépendante mais un outil d'aide à la décision pour l'équipe de développement.
|
||||
|
||||
> **Convention** : Le terme "système de référence" désigne le système P2P de découverte et d'échange de ressources actuellement en développement, dont l'architecture de découverte est partiellement alignée avec la proposition mais qui n'implémente pas encore les couches de règlement monétaire, de confidentialité renforcée, ni les adaptations DTN. La "proposition" renvoie au document `PROPOSITION_ARCHITECTURE_MINIMALE.md`.
|
||||
|
||||
---
|
||||
|
||||
## Résumé Exécutif
|
||||
|
||||
| # | Gap | Criticité | Effort estimé | Priorité |
|
||||
|---|---|---|---|---|
|
||||
| G1 | Couche blockchain de règlement monétaire absente | 🔴 Critique | Élevé (6–12 mois) | P0 |
|
||||
| G2 | Records DHT non chiffrés (confidentialité payload) | 🔴 Critique (contexte hostile) | Faible (2–4 semaines) | P1 |
|
||||
| G3 | Absence de tolérance DTN | 🔴 Critique (contexte spatial) | Moyen (2–4 mois) | P1 |
|
||||
| G4 | Gestion de consortiums binaire (absence du niveau 2) | 🟠 Significatif | Moyen (1–3 mois) | P2 |
|
||||
| G5 | Résistance Sybil uniquement organisationnelle | 🟠 Significatif (réseau ouvert) | Dépend de G1 | P3 |
|
||||
| G6 | Attestation ZK de consommation absente | 🟠 Significatif | Élevé (4–8 mois) | P2 |
|
||||
| G7 | Identité DID non implémentée | 🟡 Partiel | Moyen (2–3 mois) | P3 |
|
||||
| G8 | Scoring multidimensionnel | ✅ Convergence forte | — | — |
|
||||
| G9 | Gossip / SWIM membership events | ✅ Convergence forte | — | — |
|
||||
|
||||
**Lecture** : Les gaps G1, G2, G3 sont bloquants pour un déploiement en contexte hostile ou spatial. G4 et G6 sont nécessaires pour la marketplace de ressources. G5 et G7 sont des améliorations importantes mais non bloquantes à court terme pour les déploiements en réseau fermé.
|
||||
|
||||
---
|
||||
|
||||
## Contexte : Deux Systèmes à Prérequis Distincts
|
||||
|
||||
La proposition distingue formellement deux sous-systèmes aux prérequis mutuellement incompatibles s'ils étaient fusionnés :
|
||||
|
||||
### Système de Découverte et d'Échange entre Pairs
|
||||
**Rôle** : localiser les pairs, évaluer leur qualité, maintenir le réseau de voisinage, propager les enregistrements de ressources.
|
||||
|
||||
**Prérequis** :
|
||||
- Haute disponibilité (profil AP du théorème CAP)
|
||||
- Tolérance aux partitions et à la connectivité intermittente
|
||||
- Faible latence de découverte (secondes, pas minutes)
|
||||
- Cohérence éventuelle acceptable
|
||||
- Protocoles légers, binaires, compacts
|
||||
|
||||
**Ce que le système de référence implémente** : ce sous-système est substantiellement développé. La DHT Kademlia, le scoring multidimensionnel, les heartbeats bidirectionnels, le gossip d'événements d'appartenance, et la ConnectionGater sont en place et alignés avec la proposition.
|
||||
|
||||
### Système de Règlement Monétaire et Transactionnel
|
||||
**Rôle** : régler les transactions financières résultant de la consommation de ressources marketplace (compute, stockage, données, workflows), de manière automatisée et confidentielle.
|
||||
|
||||
**Prérequis** :
|
||||
- Cohérence forte (profil CP — prévention double-spend)
|
||||
- Finalité déterministe ou quasi-déterministe
|
||||
- Confidentialité des montants et des parties
|
||||
- Smart contracts pour l'automatisation du paiement conditionnel
|
||||
- Gouvernance de consortium (admission, révocation de validateurs)
|
||||
- Faible empreinte énergétique (PoW exclu)
|
||||
|
||||
**Ce que le système de référence implémente** : ce sous-système est entièrement absent. C'est le gap le plus critique.
|
||||
|
||||
> **Principe fondamental** : ces deux systèmes NE DOIVENT PAS partager le même protocole. La découverte optimise pour la disponibilité — la blockchain optimise pour la cohérence. Fusionner ces couches dégrade les garanties des deux.
|
||||
|
||||
---
|
||||
|
||||
## G1 — Couche Blockchain de Règlement Monétaire Absente
|
||||
|
||||
**Criticité** : 🔴 Critique — bloquant pour la marketplace économique
|
||||
|
||||
### État actuel du système de référence
|
||||
Le système de référence ne dispose d'aucune couche de règlement monétaire. Il peut découvrir des ressources, coordonner leur utilisation, et propager des records de présence, mais ne peut pas :
|
||||
- Automatiser le paiement entre fournisseur et consommateur de ressources
|
||||
- Émettre un token ou représenter une unité de valeur
|
||||
- Exécuter un contrat conditionnel à l'attestation d'une livraison de ressource
|
||||
- Régler des transactions entre organisations différentes sans accord hors-bande
|
||||
|
||||
### Impact opérationnel
|
||||
Sans couche de règlement, la marketplace de ressources (compute, storage, data, workflows en rent/buy) ne peut fonctionner qu'avec des accords commerciaux hors-bande — ce qui annule le bénéfice de la décentralisation pour l'aspect économique. Un fournisseur de compute ne peut pas être payé automatiquement à la livraison d'un résultat vérifié.
|
||||
|
||||
### Options de développement
|
||||
|
||||
**Option A — Hyperledger Fabric (recommandée pour phase 1)**
|
||||
- Blockchain permissionnée, consortium fermé
|
||||
- Finalité déterministe (Raft ou PBFT), canaux privés natifs
|
||||
- Chaincode Go/Node.js mature, CouchDB state store
|
||||
- Aucun token public requis : jeton interne au consortium
|
||||
- **Avantage** : maturité maximale, documentation, écosystème d'entreprise
|
||||
- **Inconvénient** : pas trustless pour les échanges inter-consortium
|
||||
- **Effort** : 3–6 mois pour une intégration fonctionnelle
|
||||
|
||||
**Option B — Cosmos SDK + zone permissionnée (recommandée si interopérabilité inter-consortium est prioritaire)**
|
||||
- Chaîne souveraine Cosmos, consensus Tendermint BFT (~6s finalité)
|
||||
- IBC pour les échanges inter-zone (inter-consortium)
|
||||
- CosmWasm pour les smart contracts
|
||||
- **Avantage** : interopérabilité native avec d'autres zones Cosmos
|
||||
- **Inconvénient** : pas de confidentialité native inter-zone, complexity opérationnelle plus élevée que Fabric
|
||||
- **Effort** : 4–8 mois pour une intégration fonctionnelle
|
||||
|
||||
**Option C — Secret Network + Cosmos (recommandée pour inter-consortium confidentiel)**
|
||||
- Smart contracts chiffrés via TEE (Intel SGX)
|
||||
- Compatible Cosmos (IBC), finalité Tendermint
|
||||
- **Avantage** : confidentialité des smart contracts native
|
||||
- **Inconvénient** : dépendance SGX (side-channels documentés), écosystème plus restreint
|
||||
- **Usage recommandé** : couche inter-consortium après phase 1 avec Fabric intra-consortium
|
||||
|
||||
**Option D — Oasis Network (alternative à Secret pour environnements embarqués)**
|
||||
- ParaTime confidentielle + EVM (Sapphire)
|
||||
- Compatible Cosmos
|
||||
- **Avantage** : EVM mature + confidentialité, architecture ParaTime flexible
|
||||
- **Inconvénient** : dépendance TEE, écosystème plus jeune
|
||||
|
||||
### Recommandation de feuille de route pour G1
|
||||
```
|
||||
Phase 1 (priorité immédiate) :
|
||||
→ Hyperledger Fabric intra-consortium
|
||||
→ Smart contracts de paiement conditionnel basiques (attestation → paiement)
|
||||
→ Interface entre le système de découverte et Fabric (événements de consommation)
|
||||
|
||||
Phase 2 (6–12 mois) :
|
||||
→ Cosmos SDK zone permissionnée pour l'interopérabilité inter-consortium
|
||||
→ IBC configuré entre les zones des différents consortiums
|
||||
|
||||
Phase 3 (12–24 mois) :
|
||||
→ Secret Network ou Oasis pour le règlement confidentiel inter-consortium
|
||||
→ ZK-proofs d'attestation de consommation (voir G6)
|
||||
```
|
||||
|
||||
### Dépendances
|
||||
- G6 (attestation ZK) dépend de G1 : impossible d'implémenter la preuve de consommation sans smart contract récepteur
|
||||
- G5 (résistance Sybil cryptoéconomique) dépend de G1 : le stake on-chain requiert une blockchain
|
||||
|
||||
---
|
||||
|
||||
## G2 — Records DHT Non Chiffrés (Confidentialité du Payload)
|
||||
|
||||
**Criticité** : 🔴 Critique en contexte hostile — 🟡 Mineur en réseau privé fermé
|
||||
|
||||
### État actuel du système de référence
|
||||
Les enregistrements de présence (PeerRecords) dans la DHT sont signés (authentification et intégrité garanties) mais non chiffrés. Le contenu — nom du pair, URL d'API, adresses réseau, clé publique, ressources annoncées — est lisible par tout nœud participant à la DHT ou observant le trafic réseau.
|
||||
|
||||
### Impact opérationnel
|
||||
Dans un contexte hostile avec des watchers passifs :
|
||||
- Un adversaire peut reconstruire la liste complète des participants actifs
|
||||
- Les patterns d'activité (fréquence de renouvellement des records) révèlent des informations opérationnelles
|
||||
- Les URLs d'API et adresses réseau permettent le ciblage d'acteurs spécifiques
|
||||
- La corrélation des records dans le temps permet de reconstruire des graphes d'activité
|
||||
|
||||
### Options de développement
|
||||
|
||||
**Option A — Chiffrement symétrique par clé de consortium (recommandée)**
|
||||
- La clé de déchiffrement est dérivée de la PSK du consortium
|
||||
- Le payload du PeerRecord est chiffré avec AES-256-GCM avant insertion dans la DHT
|
||||
- Seuls les membres partageant la PSK peuvent déchiffrer les records
|
||||
- La DHT agit comme système de stockage aveugle
|
||||
- **Effort** : 2–4 semaines (modification du format du PeerRecord + handlers d'encodage/décodage)
|
||||
|
||||
**Option B — Chiffrement asymétrique par liste d'accès (plus flexible, plus complexe)**
|
||||
- Chaque record est chiffré avec les clés publiques des membres autorisés
|
||||
- Permet un contrôle d'accès granulaire par record
|
||||
- **Inconvénient** : taille des records augmente avec le nombre de destinataires, complexité opérationnelle
|
||||
|
||||
**Option C — Mode mixte : metadata publiques, payload chiffré**
|
||||
- Seul le payload sensible est chiffré ; les métadonnées de routage restent en clair
|
||||
- Compromis entre découvrabilité et confidentialité
|
||||
|
||||
### Recommandation
|
||||
Option A pour les déploiements en contexte hostile. Activable par flag de configuration du réseau (backward-compatible avec les déploiements existants en réseau ouvert). Le chiffrement est transparent pour la couche DHT : la DHT stocke des opaque blobs sans connaître leur structure.
|
||||
|
||||
---
|
||||
|
||||
## G3 — Absence de Tolérance DTN
|
||||
|
||||
**Criticité** : 🔴 Critique pour le contexte spatial/embarqué
|
||||
|
||||
### État actuel du système de référence
|
||||
Le système de référence est conçu pour une connectivité TCP continue :
|
||||
- Heartbeats toutes les ~20 secondes — impossibles dans un contexte DTN
|
||||
- Un pair absent pendant quelques intervalles est marqué suspect puis évincé
|
||||
- Les scores d'uptime sont dégradés artificiellement pour les pairs à connectivité intermittente
|
||||
- Aucun mécanisme de stockage-et-retransmission pour les messages non livrables
|
||||
- Les TTL des PeerRecords (~2 minutes) sont inadaptés à des cycles de déconnexion de plusieurs heures
|
||||
|
||||
### Impact opérationnel en contexte spatial
|
||||
Un satellite LEO passe hors de couverture sol toutes les 90 minutes. Avec les paramètres actuels :
|
||||
- Chaque passage en dehors de couverture déclenche une série de faux positifs de détection de panne
|
||||
- Le satellite est systématiquement évincé de tous les pools de ses pairs lors de chaque interruption
|
||||
- À la reconnexion, une reconnexion complète (DHT bootstrap, re-heartbeat, re-scoring) est nécessaire — coûteuse en bande passante et en temps
|
||||
|
||||
### Options de développement
|
||||
|
||||
**Option A — Profil de connectivité par pair (recommandée, implémentation minimale)**
|
||||
- Chaque pair déclare un profil de connectivité (`CONTINUOUS | INTERMITTENT | ORBITAL`)
|
||||
- Les seuils de détection de panne sont paramétrés par profil
|
||||
- Un pair `INTERMITTENT` peut être absent N heures sans déclencher de suspect
|
||||
- **Effort** : 3–6 semaines (extension du PeerRecord + modification du scoring uptime)
|
||||
|
||||
**Option B — TTL adaptatifs selon profil orbital (complémentaire)**
|
||||
- Les PeerRecords d'un pair orbital ont un TTL égal à (durée max d'absence prévisible + marge)
|
||||
- Le pair renouvelle ses records juste avant chaque période de déconnexion prévue
|
||||
- **Effort** : 1–2 semaines
|
||||
|
||||
**Option C — Heartbeats batch signés (pour le contexte DTN strict)**
|
||||
- Pendant les périodes de déconnexion, les heartbeats sont signés localement avec timestamp
|
||||
- À la reconnexion, ils sont soumis en batch avec validation de la séquence temporelle
|
||||
- Permet une reconstruction de l'historique d'uptime sans connexion continue
|
||||
- **Effort** : 4–8 semaines
|
||||
|
||||
**Option D — Store-and-forward pour les messages critiques**
|
||||
- Les messages de mise à jour non livrables sont stockés localement et retransmis lors de la prochaine fenêtre
|
||||
- Aligné avec RFC 4838 [Cerf et al., 2007]
|
||||
- **Effort** : 2–4 mois (infrastructure significative)
|
||||
|
||||
### Recommandation
|
||||
Implémenter A + B en priorité (effort faible, impact immédiat). C et D constituent une roadmap long terme pour un support DTN complet.
|
||||
|
||||
---
|
||||
|
||||
## G4 — Gestion de Consortiums Binaire (Absence du Niveau 2)
|
||||
|
||||
**Criticité** : 🟠 Significatif pour les coalitions multi-organisations
|
||||
|
||||
### État actuel du système de référence
|
||||
Le système de référence implémente un modèle binaire : une PSK est présente ou absente. Un pair soit appartient au consortium (PSK correcte → confiance élevée), soit est un inconnu (pas de PSK → confiance nulle). Il n'existe pas de niveau intermédiaire formalisé pour les pairs d'organisations alliées qui ne partageraient pas la PSK principale.
|
||||
|
||||
### Impact opérationnel
|
||||
Dans une coalition GARDEN multi-organisationnelle :
|
||||
- Deux organisations alliées souhaitant interopérer doivent soit partager leur PSK principale (sécurité dégradée), soit se traiter mutuellement comme des inconnus (fonctionnalité dégradée)
|
||||
- Aucun mécanisme ne permet de dire "ce pair est de confiance modérée, il peut consommer des ressources publiques mais pas les ressources critiques"
|
||||
|
||||
### Option de développement
|
||||
|
||||
**Attestation de niveau 2 (recommandée)**
|
||||
- Un pair de niveau 1 (PSK) émet une attestation signée pour un pair externe
|
||||
- L'attestation contient : clé publique du pair, période de validité, droits d'accès (périmètre restreint)
|
||||
- Les pairs recevant une attestation valide signée par un membre de niveau 1 leur accordent un score initial de 40/100 (niveau 2)
|
||||
- Les attestations sont révocables via tombstone signé
|
||||
- **Effort** : 4–8 semaines (nouveau type de record + handlers d'admission)
|
||||
|
||||
---
|
||||
|
||||
## G5 — Résistance Sybil Uniquement Organisationnelle
|
||||
|
||||
**Criticité** : 🟠 Significatif pour les déploiements en réseau ouvert ou hostile — 🟢 Acceptable pour les consortiums fermés
|
||||
|
||||
### État actuel du système de référence
|
||||
La PSK organisationnelle constitue une barrière d'entrée sociale efficace pour les réseaux fermés. Elle n'est pas cryptoéconomique (pas de coût financier d'entrée) mais organisationnelle (la PSK doit être obtenue auprès d'un membre existant).
|
||||
|
||||
### Impact
|
||||
Pour les déploiements en réseau fermé (consortium PSK), ce gap n'est pas critique. Pour les déploiements en réseau ouvert ou dans des contextes où la PSK pourrait être compromise/partagée massivement, la résistance Sybil devient insuffisante.
|
||||
|
||||
### Option de développement
|
||||
- **Stake on-chain** (dépend de G1) : exiger un dépôt de tokens sur la blockchain comme condition d'admission. Coûteux à implémenter sans G1.
|
||||
- **Proof of Work sur le NodeID** (S/Kademlia style [Baumgart & Meinert, 2007]) : coût computationnel modéré à la création d'identité. Indépendant de G1. **Effort** : 2–3 semaines.
|
||||
|
||||
---
|
||||
|
||||
## G6 — Attestation ZK de Consommation de Ressource Absente
|
||||
|
||||
**Criticité** : 🟠 Significatif — nécessaire pour un marché de ressources décentralisé crédible
|
||||
|
||||
### État actuel du système de référence
|
||||
Aucun mécanisme ne permet de prouver cryptographiquement qu'une ressource a été réellement consommée (compute exécuté, données livrées, stockage maintenu) sans révéler le contenu ni l'identité de l'initiateur. Le règlement des transactions repose entièrement sur la confiance organisationnelle.
|
||||
|
||||
### Options de développement
|
||||
|
||||
**Option A — TEE attestations (Intel SGX / ARM TrustZone)**
|
||||
- Le nœud fournisseur exécute le workload dans une enclave sécurisée
|
||||
- L'enclave génère une attestation signée par le microprocesseur prouvant que le code s'est exécuté dans un environnement sécurisé
|
||||
- L'attestation est soumise au smart contract comme preuve de livraison
|
||||
- **Avantage** : relativement mature (iExec utilise cette approche)
|
||||
- **Inconvénient** : dépendance matérielle, vulnérabilités SGX documentées [Costan & Devadas, 2016]
|
||||
- **Effort** : 3–6 mois
|
||||
|
||||
**Option B — ZK-proof d'exécution (approche cryptographique pure)**
|
||||
- Un ZKP prouve la bonne exécution d'un programme sur des entrées données
|
||||
- Pas de dépendance matérielle — sécurité cryptographique pure
|
||||
- **Inconvénient** : génération du proof encore coûteuse pour des programmes complexes (zkEVM, zkVM)
|
||||
- **Horizon** : viable à moyen terme (2–3 ans) pour des workloads standards
|
||||
|
||||
**Option C — Challenge-response statistique (approche hybride à court terme)**
|
||||
- Le consommateur soumet un échantillon aléatoire de résultats intermédiaires vérifiables
|
||||
- Le fournisseur prouve avoir exécuté le calcul en répondant correctement aux challenges
|
||||
- Moins fort cryptographiquement mais praticable immédiatement
|
||||
- **Effort** : 4–6 semaines
|
||||
|
||||
### Recommandation
|
||||
Option C comme solution court terme (praticable sans G1 finalisé). Option A comme solution moyen terme (dépend de la disponibilité TEE sur les nœuds cibles). Option B comme horizon long terme.
|
||||
|
||||
---
|
||||
|
||||
## G7 — Identité DID Non Implémentée
|
||||
|
||||
**Criticité** : 🟡 Partiel — identités auto-certifiées présentes, DID formelles absentes
|
||||
|
||||
### État actuel du système de référence
|
||||
Le système de référence utilise des identités auto-certifiées (PeerID = hash de la clé publique Ed25519) — aligné avec le principe de base de la proposition. Cependant, les DID W3C [W3C, 2022] formelles ne sont pas implémentées, ce qui limite l'interopérabilité avec les écosystèmes tiers qui utilisent ce standard.
|
||||
|
||||
### Impact
|
||||
Pour les déploiements intra-consortium, ce gap est mineur. Pour l'interopérabilité avec des systèmes tiers (identity federations, portabilité des identités entre déploiements), l'absence de DID formelles est un frein.
|
||||
|
||||
### Option de développement
|
||||
- Wrapper DID W3C sur les identités PeerID existantes : publier les clés publiques des pairs sous format DID document dans la DHT. **Effort** : 2–4 semaines.
|
||||
|
||||
---
|
||||
|
||||
## G8 et G9 — Points de Convergence Forts
|
||||
|
||||
### G8 — Scoring Multidimensionnel ✅
|
||||
|
||||
Le système de référence implémente un scoring à 7+ dimensions aligné avec la proposition :
|
||||
- Ratio d'uptime (gap-aware : les absences courtes ne pénalisent pas)
|
||||
- Latence normalisée (probe RTT)
|
||||
- Précision des challenges (bandwidth + identity challenges)
|
||||
- Diversité réseau (/24 subnet diversity)
|
||||
- Taux de remplissage
|
||||
- Cohérence des témoins (witness queries)
|
||||
- Seuil d'éviction dynamique selon l'âge (20% → 80% sur 24h)
|
||||
|
||||
**Évaluation** : c'est la contribution la plus mature et la plus originale du système de référence. L'alignement avec la proposition est substantiel et constitue une base solide.
|
||||
|
||||
### G9 — Gossip / SWIM Membership Events ✅
|
||||
|
||||
Le système de référence implémente une propagation épidémique infection-style pour les événements d'appartenance :
|
||||
- États suspects (SuspectedAt) avec incarnation numérotée
|
||||
- Refutation automatique (un pair se défendant incrémente son incarnation)
|
||||
- HopsLeft décroissant pour limiter la propagation des événements périmés
|
||||
- Déduplication par (PeerID, Incarnation)
|
||||
|
||||
**Évaluation** : convergence forte avec les principes SWIM [Das et al., 2002]. L'implémentation est un atout majeur pour la résistance aux faux positifs de détection de panne.
|
||||
|
||||
---
|
||||
|
||||
## Matrice des Dépendances
|
||||
|
||||
```
|
||||
G1 (blockchain)
|
||||
├── G5 (stake Sybil) — G5 dépend de G1 pour la version cryptoéconomique
|
||||
└── G6 (attestation ZK) — G6 (option A/B) dépend de G1 pour le smart contract récepteur
|
||||
|
||||
G2 (chiffrement records) — indépendant, priorité P1
|
||||
G3 (DTN) — indépendant, priorité P1
|
||||
G4 (consortiums niveau 2) — indépendant de G1 (attestation = records signés, pas blockchain)
|
||||
G7 (DID) — indépendant
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Roadmap de Convergence Recommandée
|
||||
|
||||
### Phase 0 — Fondations (immédiat, effort faible)
|
||||
| Action | Gap | Durée estimée |
|
||||
|---|---|---|
|
||||
| Chiffrement optionnel payload DHT (clé dérivée de la PSK) | G2 | 2–4 semaines |
|
||||
| Profil de connectivité par pair (CONTINUOUS/INTERMITTENT/ORBITAL) + TTL adaptatifs | G3 (partiel) | 3–6 semaines |
|
||||
| Mécanisme d'attestation de niveau 2 (confiance intermédiaire) | G4 | 4–8 semaines |
|
||||
|
||||
### Phase 1 — Règlement Monétaire Intra-Consortium (3–6 mois)
|
||||
| Action | Gap | Durée estimée |
|
||||
|---|---|---|
|
||||
| Intégration Hyperledger Fabric | G1 | 3–6 mois |
|
||||
| Smart contracts de paiement conditionnel basiques | G1 | Inclus |
|
||||
| Interface système de découverte ↔ Fabric (événements de consommation) | G1 | Inclus |
|
||||
| Challenge-response statistique pour attestation de livraison | G6 (option C) | 4–6 semaines |
|
||||
|
||||
### Phase 2 — Règlement Inter-Consortium et Confidentialité (6–12 mois)
|
||||
| Action | Gap | Durée estimée |
|
||||
|---|---|---|
|
||||
| Cosmos SDK zone permissionnée pour interopérabilité inter-consortium | G1 | 4–8 mois |
|
||||
| Heartbeats batch signés (DTN complet) | G3 (complet) | 4–8 semaines |
|
||||
| Proof of Work sur NodeID (résistance Sybil légère) | G5 | 2–3 semaines |
|
||||
| Wrapper DID W3C | G7 | 2–4 semaines |
|
||||
|
||||
### Phase 3 — Confidentialité Transactionnelle Avancée (12–24 mois)
|
||||
| Action | Gap | Durée estimée |
|
||||
|---|---|---|
|
||||
| Secret Network ou Oasis pour règlement confidentiel inter-consortium | G1 | 4–8 mois |
|
||||
| TEE attestations pour preuve de consommation | G6 (option A) | 3–6 mois |
|
||||
| Store-and-forward DTN complet (RFC 4838) | G3 (complet) | 2–4 mois |
|
||||
|
||||
---
|
||||
|
||||
## Synthèse
|
||||
|
||||
Le système de référence constitue une base solide pour le plan de découverte (G8, G9 convergents). Le scoring comportemental multidimensionnel et les mécanismes SWIM sont des contributions de qualité, correctement alignées avec la proposition.
|
||||
|
||||
Le gap structurel majeur est l'absence du plan de règlement monétaire (G1). Sans cette couche, le système peut coordonner l'utilisation de ressources mais ne peut pas les monétiser, limitant son périmètre à des usages intra-organisationnels sans incitation économique automatisée.
|
||||
|
||||
Les gaps de confidentialité (G2, G3) sont adressables à court terme avec un effort relativement faible et constituent des priorités immédiates pour les déploiements en contexte hostile ou spatial.
|
||||
|
||||
La feuille de route recommandée priorise : confidentialité immédiate (G2) → tolérance DTN partielle (G3) → règlement monétaire intra-consortium (G1, phase 1) → interopérabilité inter-consortium (G1, phase 2) → confidentialité transactionnelle avancée (G1, phase 3).
|
||||
|
||||
---
|
||||
|
||||
## Références
|
||||
|
||||
**[Baumgart & Meinert, 2007]** Baumgart, I., & Meinert, S. (2007). S/Kademlia: A practicable approach towards secure key-based routing. *ICPADS 2007*, pp. 1–8.
|
||||
|
||||
**[Cerf et al., 2007]** Cerf, V., et al. (2007). Delay-Tolerant Networking Architecture. *RFC 4838*, IETF.
|
||||
|
||||
**[Costan & Devadas, 2016]** Costan, V., & Devadas, S. (2016). Intel SGX Explained. *IACR Cryptology ePrint Archive*, Report 2016/086.
|
||||
|
||||
**[Das et al., 2002]** Das, A., Gupta, I., & Motivala, A. (2002). SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol. *DSN 2002*, pp. 303–312.
|
||||
|
||||
**[Douceur, 2002]** Douceur, J. R. (2002). The Sybil Attack. *IPTPS 2002*, LNCS 2429, pp. 251–260.
|
||||
|
||||
**[Kwon & Buchman, 2016]** Kwon, J., & Buchman, E. (2016). *Cosmos: A Network of Distributed Ledgers*. Whitepaper.
|
||||
|
||||
**[Oasis Labs, 2020]** Oasis Network (2020). *Oasis Network Primer*. https://oasisprotocol.org/primer
|
||||
|
||||
**[W3C, 2022]** W3C (2022). *Decentralized Identifiers (DIDs) v1.0*. https://www.w3.org/TR/did-core/
|
||||
463
docs/PROPOSITION_ARCHITECTURE_MINIMALE.md
Normal file
463
docs/PROPOSITION_ARCHITECTURE_MINIMALE.md
Normal file
@@ -0,0 +1,463 @@
|
||||
# Proposition d'Architecture Minimale pour un Réseau Décentralisé Souverain dans un Contexte Embarqué et Hostile
|
||||
|
||||
**Catégorie** : Architecture des systèmes distribués — Proposition de conception
|
||||
**Domaine d'application** : Systèmes embarqués, contexte spatial, réseaux DTN, environnements hostiles
|
||||
**Version** : 1.0 — Mars 2026
|
||||
|
||||
---
|
||||
|
||||
## Résumé
|
||||
|
||||
Ce document présente une proposition d'architecture minimale pour un réseau distribué décentralisé répondant aux exigences de souveraineté, de confidentialité, et de tolérance aux disruptions réseau dans des environnements hostiles tels que les systèmes spatiaux embarqués ou les réseaux opérant en présence d'adversaires actifs. Le contexte cible — désigné GARDEN (Generic Architecture for Resilient Decentralized Execution Networks) — impose des contraintes qui rendent inadaptée toute solution standard : connectivité intermittente (paradigme DTN [Fall, 2003 ; Cerf et al., 2007]), présence d'acteurs à niveaux de confiance hétérogènes incluant des adversaires d'État, contraintes énergétiques et de bande passante sévères, et exigence de souveraineté absolue des données sur les acteurs externes.
|
||||
|
||||
L'architecture proposée repose sur une séparation stricte en trois plans indépendants : le plan de découverte de pairs, le plan de transactions de ressources, et le plan de règlement monétaire. Cette séparation n'est pas un choix stylistique mais une nécessité de sécurité : chaque plan présente des exigences de disponibilité, de cohérence et de confidentialité fondamentalement différentes, et leur fusion dans un protocole unique crée des couplages qui dégradent les garanties de sécurité de chacun.
|
||||
|
||||
La proposition couvre en détail les sept couches fonctionnelles de cette architecture : le transport chiffré (Noise Protocol Framework [Perrin, 2018]), la découverte DHT avec scoring comportemental multidimensionnel, la gestion des enregistrements de ressources avec chiffrement bout-en-bout, l'attestation de consommation par preuves à divulgation nulle (ZK-proofs [Groth, 2016]), le règlement monétaire par blockchain confidentielle (analyse comparative de dix solutions), et la gouvernance des consortiums à confiance relative.
|
||||
|
||||
Huit verrous conceptuels et technologiques sont identifiés et analysés : identité sans autorité centrale (TOFU), bootstrap trustless, résistance Sybil sans coût cryptoéconomique, cohérence CAP dans un réseau AP, détection de panne sans oracle (FLP), confidentialité des transactions on-chain, transactions intermittentes en contexte DTN, et oracle problem pour les preuves de consommation réelle. Pour chaque verrou, l'état de l'art des solutions disponibles est présenté avec leurs limites résiduelles.
|
||||
|
||||
---
|
||||
|
||||
## 1. Principes Directeurs Non-Négociables
|
||||
|
||||
Les principes suivants constituent les invariants de l'architecture proposée. Tout compromis sur ces principes dégrade structurellement la sécurité du système dans le contexte hostile cible.
|
||||
|
||||
### Principe 1 — Souveraineté par Défaut
|
||||
|
||||
Aucune donnée ne doit quitter le périmètre de confiance d'un acteur sans son consentement explicite. Ce principe s'applique à toutes les couches : les enregistrements de présence dans la DHT, les transactions de ressources, et le règlement monétaire. La souveraineté s'entend au sens technique (contrôle cryptographique du cycle de vie des données) et opérationnel (possibilité de révocation et d'expiration contrôlée).
|
||||
|
||||
### Principe 2 — Confiance Relative et Continue (Non-Binaire)
|
||||
|
||||
Le modèle de confiance ne peut être binaire dans un réseau hostile. Chaque pair se voit associer un score de confiance multidimensionnel calculé sur la base de comportements observés (disponibilité historique, précision des challenges, cohérence des données annoncées, diversité réseau). Ce score évolue dans le temps et détermine dynamiquement les interactions autorisées. L'impossibilité de distinguer panne et lenteur (FLP [Fischer, Lynch & Paterson, 1985]) implique que ce score est probabiliste par nature.
|
||||
|
||||
### Principe 3 — Disponibilité Prioritaire pour la Découverte, Cohérence Prioritaire pour le Règlement
|
||||
|
||||
La couche de découverte adopte délibérément le profil AP du théorème CAP [Brewer, 2000 ; Gilbert & Lynch, 2002] : elle doit rester opérationnelle même en cas de partitionnement, au prix d'une cohérence éventuellement relâchée. La couche de règlement monétaire adopte le profil CP : la cohérence forte est requise pour la prévention du double-spend, au prix d'une indisponibilité temporaire lors des partitions.
|
||||
|
||||
### Principe 4 — Opérabilité en Mode Complètement Autonome
|
||||
|
||||
Un nœud doit pouvoir opérer — prendre des décisions, gérer ses ressources locales, maintenir un état cohérent — sans aucune connexion réseau pendant des durées pouvant atteindre plusieurs jours. Les protocoles de resynchronisation après reconnexion doivent être déterministes, convergents, et capables de gérer les conflits d'état accumulés pendant la période d'isolation.
|
||||
|
||||
### Principe 5 — Opacité des Flux (Confidentialité des Métadonnées)
|
||||
|
||||
Le chiffrement de contenu est nécessaire mais insuffisant. L'architecture doit minimiser les informations révélées par les métadonnées de trafic : fréquence des échanges, volume, topologie des connexions. Des techniques de padding temporel, de trafic fictif calibré, et de routage multi-sauts doivent être employées sur les liens exposés à des observateurs potentiellement adversariaux.
|
||||
|
||||
### Principe 6 — Minimisation de la Surface d'Attaque
|
||||
|
||||
Chaque couche de l'architecture ne doit exposer que les fonctionnalités strictement nécessaires à son rôle. La couche de découverte ne doit pas avoir accès aux clés de chiffrement des transactions ; la couche de règlement ne doit pas avoir connaissance de la topologie du réseau de découverte. Cette isolation minimise l'impact d'une compromission partielle.
|
||||
|
||||
---
|
||||
|
||||
## 2. Séparation des Couches : Architecture en Trois Plans
|
||||
|
||||
L'architecture proposée repose sur une séparation stricte en trois plans fonctionnels indépendants. Cette séparation est motivée par des exigences de sécurité mutuellement incompatibles si ces plans étaient fusionnés.
|
||||
|
||||
### Plan de Découverte
|
||||
|
||||
**Objectif** : permettre à un nœud de localiser d'autres nœuds offrant des ressources compatibles, d'évaluer leur qualité, et de maintenir un réseau de voisinage robuste.
|
||||
|
||||
**Exigences** : haute disponibilité (AP), tolérance aux partitions, faible latence de découverte, résistance aux manipulations de routage. La cohérence n'est pas critique : une vue légèrement obsolète du réseau est acceptable.
|
||||
|
||||
**Ce qui ne doit PAS figurer dans ce plan** : le contenu des transactions, les identités réelles des acteurs, les montants échangés, ou tout élément permettant d'inférer des stratégies opérationnelles.
|
||||
|
||||
### Plan de Données et Ressources
|
||||
|
||||
**Objectif** : permettre à des acteurs de décrire, offrir, découvrir et transacter des ressources (compute, stockage, données, workflows) de manière sécurisée et souveraine.
|
||||
|
||||
**Exigences** : intégrité forte (enregistrements signés), confidentialité du contenu (chiffrement bout-en-bout), souveraineté du cycle de vie (TTL contrôlé par le créateur), attestation de consommation vérifiable sans révélation du contenu.
|
||||
|
||||
**Ce qui ne doit PAS figurer dans ce plan** : le règlement monétaire (séparation de la couche applicative et de la couche financière), ni les détails de routage de la couche de découverte.
|
||||
|
||||
### Plan de Règlement Monétaire
|
||||
|
||||
**Objectif** : régler les transactions monétaires résultant de la consommation de ressources, de manière automatisée, confidentielle, et résistante à la censure.
|
||||
|
||||
**Exigences** : finalité déterministe ou quasi-déterministe, confidentialité des montants et des parties, smart contracts pour l'automatisation du règlement, gouvernance de consortium, résistance à la censure par des validateurs adversariaux.
|
||||
|
||||
**Pourquoi ces trois plans doivent rester indépendants** :
|
||||
|
||||
La couche de découverte optimise pour la disponibilité et la performance de routage — des propriétés antagonistes avec la confidentialité forte. Fusionner les couches de découverte et de données exposerait les patterns d'accès aux ressources à tout observateur participant à la DHT. La couche de règlement monétaire requiert une cohérence forte et une finalité déterministe — des propriétés incompatibles avec le profil AP de la découverte. Un seul protocole ne peut satisfaire simultanément ces exigences contradictoires.
|
||||
|
||||
Analogie : le réseau téléphonique (couche de transport) ne gère pas le réseau bancaire (couche de règlement), même si les deux transportent des informations liées à des transactions économiques.
|
||||
|
||||
---
|
||||
|
||||
## 3. Couche de Découverte P2P
|
||||
|
||||
### 3.1 DHT Kademlia avec Espace de Noms Privé
|
||||
|
||||
La DHT Kademlia [Maymounkov & Mazières, 2002] constitue la base la plus adaptée pour la couche de découverte décentralisée. Son routage XOR en O(log n) sauts, sa robustesse au churn, et son implémentation dans de nombreux frameworks (libp2p, notamment) en font le choix le plus mature pour les réseaux P2P décentralisés à grande échelle.
|
||||
|
||||
Pour un réseau privé ou semi-privé, l'espace de noms DHT doit être isolé de la DHT globale publique. Cette isolation est réalisée par une clé de réseau (network key) distincte, empêchant la découverte croisée avec des nœuds appartenant à d'autres réseaux. S/Kademlia [Baumgart & Meinert, 2007] étend Kademlia avec des contraintes cryptographiques sur la génération des identifiants de nœuds (preuve de travail légère sur le NodeID) et des signatures sur les messages de routage, réduisant significativement les vecteurs d'empoisonnement.
|
||||
|
||||
Il n'existe pas d'alternative réaliste à Kademlia pour un réseau P2P décentralisé à cette échelle : Chord [Stoica et al., 2003] est plus simple mais moins robuste au churn ; Pastry et Tapestry présentent des propriétés similaires mais un écosystème d'implémentation plus réduit.
|
||||
|
||||
### 3.2 Transport Chiffré Multi-Protocole
|
||||
|
||||
Le Noise Protocol Framework [Perrin, 2018] fournit la primitive de transport sécurisé. Le pattern Noise_XX permet une authentification mutuelle des parties par échange de clés publiques, sans infrastructure PKI centralisée. La légèreté du framework (implémentation en quelques centaines de lignes, sans dépendance à une bibliothèque X.509) le rend particulièrement adapté aux contraintes embarquées.
|
||||
|
||||
Pour les réseaux de niveau de confiance élevée (consortium fermé), une clé pré-partagée (PSK) peut être incorporée dans le handshake Noise (pattern Noise_XXpsk), offrant une couche d'authentification supplémentaire qui garantit qu'un nœud non-membre du consortium ne peut même pas établir une connexion, réduisant drastiquement la surface d'attaque Sybil.
|
||||
|
||||
Le support multi-protocole (TCP, QUIC, WebTransport selon la disponibilité du lien) permet d'adapter le transport aux contraintes du lien physique, en particulier dans les environnements DTN où TCP peut être remplacé par des protocoles de couche bundle [Cerf et al., 2007].
|
||||
|
||||
### 3.3 Bootstrap Minimaliste
|
||||
|
||||
Le problème du premier contact est inévitable dans tout réseau décentralisé. La proposition retient une approche à trois niveaux :
|
||||
|
||||
- **Seeds codés en dur** : un ensemble minimal de nœuds de bootstrap dont les adresses et clés publiques sont incluses dans la distribution logicielle. Ces seeds sont diversifiés géographiquement et organisationnellement pour éviter un point de défaillance unique. Analogie directe avec les DNS seeds de Bitcoin et les directory authorities de Tor.
|
||||
- **DHT locale** : les nœuds maintiennent localement une table persistante de leurs derniers pairs connus, permettant de bootstrapper sans accès aux seeds dans les reconnections ultérieures.
|
||||
- **Résolution hors-bande** : pour les déploiements embarqués à haute sécurité, un mécanisme de distribution des seeds par canal hors-bande (support physique sécurisé, canal radio dédié) peut compléter les seeds réseau.
|
||||
|
||||
### 3.4 Scoring Comportemental Multidimensionnel
|
||||
|
||||
Le scoring comportemental constitue le mécanisme primaire d'évaluation de la qualité et de la fiabilité des pairs. La proposition retient une formule à sept dimensions, chacune justifiée par un vecteur de menace distinct :
|
||||
|
||||
**Dimension 1 — Ratio d'uptime (poids : 0.20)** : mesure la disponibilité historique du pair, en tenant compte des fenêtres de contact prévues. Un pair disponible 95% du temps dans ses fenêtres de contact prévues mérite une évaluation haute. Cette dimension pénalise les pairs instables ou éphémères (vecteur anti-Sybil par coût temporel).
|
||||
|
||||
**Dimension 2 — Latence normalisée (poids : 0.15)** : mesure le temps de réponse aux requêtes, normalisé par la latence attendue selon le type de lien. Cette dimension pénalise les pairs surchargés ou géographiquement distants sans que cela constitue un signal de compromission.
|
||||
|
||||
**Dimension 3 — Précision des challenges (poids : 0.25)** : des challenges cryptographiques périodiques (écho de données DHT, challenge de contenu connu) permettent de vérifier que le pair héberge réellement les données qu'il annonce. Un pair falsifiant sa capacité de stockage ou de traitement échouera sur ces challenges. C'est la dimension la plus résistante à la manipulation car elle requiert une réponse correcte à une question que l'adversaire ne peut anticiper.
|
||||
|
||||
**Dimension 4 — Diversité réseau (poids : 0.15)** : mesure la diversité des sous-réseaux IP des pairs que ce nœud connaît, favorisant les pairs bien connectés à des régions réseau variées. Cette dimension pénalise implicitement les clusters de nœuds contrôlés par un même acteur (colluseurs sur le même réseau).
|
||||
|
||||
**Dimension 5 — Taux de remplissage inverse (poids : 0.10)** : dans un contexte de marché de ressources, un pair offrant des ressources très chargées est moins utile qu'un pair offrant des ressources disponibles. Cette dimension oriente naturellement les nouveaux clients vers les pairs les moins sollicités, équilibrant la charge du réseau.
|
||||
|
||||
**Dimension 6 — Cohérence des témoins (poids : 0.10)** : les observations d'un pair par des témoins tiers (autres pairs qui l'ont récemment contacté) sont corrélées avec les observations directes. Une incohérence forte entre les témoignages et les observations directes signale une possible compromission ou une collusion localisée.
|
||||
|
||||
**Dimension 7 — Fiabilité DHT (poids : 0.05)** : mesure la probabilité que les enregistrements stockés par ce pair soient restitués correctement à des requêtes ultérieures. Cette dimension détecte les pairs qui acceptent des enregistrements sans les stocker (comportement parasite) ou qui altèrent les enregistrements confiés.
|
||||
|
||||
**Score global** :
|
||||
```
|
||||
Score = Σ(poids_i × dimension_i) × 100 ∈ [0, 100]
|
||||
```
|
||||
|
||||
### 3.5 Seuil d'Éviction Dynamique selon l'Âge
|
||||
|
||||
Les nouveaux pairs ne disposent pas encore d'un historique comportemental suffisant pour être évalués équitablement. Un seuil d'éviction fixe pénaliserait systématiquement les nœuds récents, créant un réseau figé favorisant les anciens membres. La proposition retient un seuil dynamique selon l'âge du pair :
|
||||
|
||||
```
|
||||
Seuil_min(age) = min(Seuil_max, Seuil_base + Seuil_pente × age_en_jours)
|
||||
```
|
||||
|
||||
Typiquement : `Seuil_min(age) = min(80, 20 + 60 × age/24h)`, démarrant à 20% (très permissif) et atteignant 80% après 24 heures de présence continue. Ce profil permet à un nouveau pair légitime de s'établir progressivement tout en évinçant rapidement les pairs manifestement défaillants ou malveillants.
|
||||
|
||||
### 3.6 Protection Contre l'Isolement Total
|
||||
|
||||
Un invariant critique de sécurité : **le dernier pair actif d'un nœud ne peut jamais être évincé pour raison de score insuffisant seul**. Si un nœud ne dispose que d'un unique pair actif, le maintien de cette connexion, même avec un pair de score médiocre, est préférable à l'isolement complet qui rendrait le nœud aveugle et muet. L'éviction du dernier pair ne peut être déclenchée que par une preuve positive de comportement malveillant (challenge échoué, signature invalide) — non par un score passant sous un seuil.
|
||||
|
||||
### 3.7 Vérification Multi-Canal
|
||||
|
||||
La vérification de l'identité et de la qualité d'un pair par un unique canal crée une surface d'attaque exploitable par un adversaire capable de simuler parfaitement le comportement attendu sur ce canal. La proposition retient une architecture de vérification à trois canaux orthogonaux :
|
||||
|
||||
- **Challenge d'identité** : vérification cryptographique que la clé publique annoncée correspond bien au pair contacté.
|
||||
- **Challenge DHT** : vérification que le pair stocke réellement les enregistrements qu'il annonce héberger, via des requêtes sur des clés dont la valeur est connue de l'observateur.
|
||||
- **Witness query** : interrogation de pairs tiers sur leur expérience récente avec le pair évalué.
|
||||
|
||||
La vérification simultanément cohérente sur ces trois canaux orthogonaux requiert un niveau de sophistication adversariale croissant de manière exponentielle avec le nombre de canaux vérifiés, rendant la tromperie coordonnée prohibitivement coûteuse.
|
||||
|
||||
### 3.8 Gossip Sub pour la Propagation d'Événements
|
||||
|
||||
Les événements d'appartenance (arrivée, départ, mise à jour de score significative) sont propagés par un mécanisme de gossip sub — diffusion épidémique structurée de type infection-style [Das et al., 2002]. Ce mécanisme garantit une convergence en O(log n) étapes avec haute probabilité, sans coordination centrale. La propagation épidémique est robuste au churn et tolère l'absence temporaire de nœuds.
|
||||
|
||||
Pour les environnements hostiles, le gossip peut être limité aux paires de pairs avec une clé de chiffrement commune (intra-consortium), empêchant la fuite d'informations topologiques vers des observateurs extérieurs.
|
||||
|
||||
### 3.9 Adaptation DTN/Spatial
|
||||
|
||||
Les adaptations suivantes sont indispensables pour le contexte DTN :
|
||||
|
||||
- **Stockage-et-retransmission** : les messages de découverte non livrables (pair inaccessible) sont stockés localement et retransmis lors des prochaines fenêtres de contact, selon le modèle de la RFC 4838 [Cerf et al., 2007].
|
||||
- **TTL adaptatifs** : le TTL des enregistrements DHT est paramétré en fonction du délai de propagation prévisible dans le réseau. Pour un satellite LEO avec une fenêtre de contact de 15 minutes toutes les 90 minutes, les TTL doivent être d'au moins 90 minutes pour survivre à un cycle orbital complet.
|
||||
- **Heartbeats asynchrones** : en l'absence de connectivité continue, les heartbeats sont accumulés localement et émis en batch lors des fenêtres de contact, avec des timestamps signés permettant la reconstitution de l'historique d'uptime.
|
||||
- **State snapshots** : l'état complet du réseau de voisinage est sérialisé périodiquement sur le stockage local, permettant une reprise sans perte après une déconnexion prolongée.
|
||||
|
||||
---
|
||||
|
||||
## 4. Couche de Données et Ressources
|
||||
|
||||
### 4.1 Records Signés avec Expiration Courte
|
||||
|
||||
Chaque enregistrement dans la DHT porte la signature de son créateur (clé privée ECDSA ou Ed25519) et un timestamp d'expiration. L'expiration courte (TTL typiquement entre 5 minutes et quelques heures selon le type de ressource) garantit l'auto-invalidation des enregistrements obsolètes sans mécanisme de suppression explicite. Le créateur est responsable du renouvellement périodique de ses enregistrements actifs.
|
||||
|
||||
Pour le contexte DTN, les TTL sont adaptés au profil de connectivité prévisible : un nœud satellite qui passe en dehors de la couverture sol pendant 2 heures doit avoir des TTL d'au moins 2 heures pour ses enregistrements critiques.
|
||||
|
||||
### 4.2 Tombstones Signés pour la Révocation Propre
|
||||
|
||||
La suppression explicite d'un enregistrement avant son expiration naturelle passe par un tombstone signé — un enregistrement spécial portant la clé de l'enregistrement révoqué, le timestamp de révocation, et la signature du créateur. Les tombstones sont propagés par gossip et stockés temporairement dans la DHT pour prévenir la réapparition de l'enregistrement révoqué (replay attack).
|
||||
|
||||
La durée de vie d'un tombstone doit être supérieure à la durée de vie maximale de l'enregistrement qu'il révoque, pour garantir que les nœuds qui n'ont pas encore vu la révocation ne réacceptent pas un replay de l'enregistrement original.
|
||||
|
||||
### 4.3 Index Secondaires dans la DHT
|
||||
|
||||
Pour permettre la découverte de ressources selon plusieurs critères (par type de ressource, par zone géographique, par niveau de confiance minimal requis), des index secondaires sont maintenus dans la DHT sous des clés dérivées des attributs d'indexation. Un enregistrement principal décrivant une ressource de compute est par exemple indexé sous `hash("compute")`, `hash("zone:LEO-1")`, et `hash("trust:consortium-A")`.
|
||||
|
||||
### 4.4 Chiffrement de Contenu Bout-en-Bout
|
||||
|
||||
Pour les données sensibles, la DHT ne doit stocker que des enveloppes chiffrées. Le payload de l'enregistrement est chiffré avec une clé symétrique connue uniquement des parties autorisées (clé de consortium ou clé dérivée d'un échange Diffie-Hellman). La DHT agit comme un système de stockage aveugle, incapable d'inférer le contenu des enregistrements qu'elle héberge.
|
||||
|
||||
Ce principe garantit que même un adversaire contrôlant une région de la DHT ne peut accéder au contenu des enregistrements, seulement à leurs métadonnées (clé DHT, créateur, expiration).
|
||||
|
||||
### 4.5 Attestation de Consommation par Zero-Knowledge Proofs
|
||||
|
||||
Le problème de l'oracle (comment prouver qu'une ressource a été réellement consommée sans révéler ni le contenu ni l'identité de l'initiateur) est adressé par les preuves à divulgation nulle de connaissance (ZK-proofs). Goldwasser, Micali et Rackoff [Goldwasser et al., 1989] ont formalisé ce paradigme ; les constructions modernes basées sur des arguments non-interactifs à divulgation nulle (zk-SNARKs [Groth, 2016]) permettent de prouver la bonne exécution d'un calcul en O(1) de vérification, quelle que soit la complexité du calcul.
|
||||
|
||||
Pour un marché de ressources, une attestation ZK permet à un consommateur de prouver à un smart contract qu'il a reçu et vérifié un résultat de calcul conforme à sa spécification, sans révéler ni le résultat lui-même ni les paramètres d'entrée.
|
||||
|
||||
### 4.6 Pub/Sub pour les Événements Applicatifs
|
||||
|
||||
Les événements applicatifs (disponibilité d'une ressource, completion d'un workflow, alerte de capacité) sont propagés via un mécanisme pub/sub indépendant de la couche de découverte. Ce mécanisme doit être léger, tolérant aux déconnexions temporaires (accumulation de messages pendant les périodes hors-ligne), et ne pas exposer la topologie du réseau de souscription.
|
||||
|
||||
---
|
||||
|
||||
## 5. Couche de Règlement Monétaire et Blockchain
|
||||
|
||||
### 5.1 Positionnement Transactionnel
|
||||
|
||||
La blockchain n'est pas la couche de découverte, ni la couche de données. Elle intervient exclusivement pour le règlement monétaire des transactions de ressources, après que ces ressources ont été consommées et attestées. Ce positionnement est fondamental : confondre la blockchain avec l'infrastructure de découverte ou de données créerait des couplages qui dégradent à la fois les performances (latence blockchain incompatible avec la découverte en temps réel) et la confidentialité (toute donnée sur la blockchain est potentiellement publique).
|
||||
|
||||
La blockchain règle les transactions APRÈS leur exécution, sur la base d'attestations cryptographiques de consommation. Elle n't pas besoin de connaître le contenu des ressources échangées, ni les identités des parties au-delà de leurs adresses blockchain (qui peuvent être pseudonymes ou chiffrées selon la solution choisie).
|
||||
|
||||
### 5.2 Exigences Non-Négociables pour la Blockchain Éligible
|
||||
|
||||
Les exigences suivantes sont dérivées des contraintes du contexte cible et constituent des critères d'exclusion pour les solutions inadaptées :
|
||||
|
||||
| Exigence | Justification |
|
||||
|---|---|
|
||||
| Confidentialité des transactions (montants et parties) | Tout observateur externe ne doit pouvoir inférer ni le volume des échanges, ni les partenariats entre acteurs |
|
||||
| Souveraineté (pas de fuite vers observateur externe) | Même les métadonnées de la blockchain ne doivent pas révéler d'informations stratégiques |
|
||||
| Finalité rapide (< 30s idéalement, < 5 min acceptable) | Compatibilité avec les fenêtres de contact DTN limitées |
|
||||
| Smart contracts pour automatisation du règlement | Automatisation du paiement conditionnel à l'attestation de consommation [Szabo, 1997 ; Wood, 2014] |
|
||||
| Résistance à la censure | Un validateur adversarial ne doit pas pouvoir bloquer unilatéralement une transaction |
|
||||
| Gouvernabilité de consortium | Admission, révocation, rotation des validateurs sans redémarrage du réseau |
|
||||
| Faible empreinte énergétique | Contrainte stricte pour les nœuds embarqués (Proof of Work exclu) |
|
||||
|
||||
### 5.3 Analyse des Blockchains Éligibles
|
||||
|
||||
| Blockchain | Confidentialité | Finalité | Smart Contracts | Consortium | Énergie | Maturité | Contexte GARDEN |
|
||||
|---|---|---|---|---|---|---|---|
|
||||
| Ethereum (L1) | ✗ (transparence totale) | △ (12s, probabiliste) | ✓✓ (EVM, très mature) | ✗ (public) | ✗ (PoS OK, mais L1 lourd) | ✓✓ | Inadapté seul |
|
||||
| Ethereum L2 ZK (Aztec, zkSync) | ✓✓ (ZK-rollup) | △ (L2 fast, L1 proof lente) | ✓ (en développement) | △ | △ | △ (jeune) | Prometteur, encore immature |
|
||||
| Aztec Network | ✓✓ (ZK natif chiffré) | △ | ✓ (Noir language) | △ | ✓ | △ (en développement actif) | Très intéressant pour confidentialité, maturité insuffisante 2026 |
|
||||
| Secret Network | ✓✓ (TEE chiffrés) | ✓ (Tendermint ~6s) | ✓✓ (CosmWasm confidentiel) | ✓ (Cosmos zones) | ✓✓ | ✓ | Bon candidat consortium |
|
||||
| Oasis Network | ✓✓ (TEE + ParaTime) | ✓ (Tendermint) | ✓✓ (EVM confidentiel) | ✓ | ✓✓ | ✓ | Très adapté embarqué + smart contracts |
|
||||
| Zcash | ✓✓ (ZK-SNARKs, shielded) | △ (PoW, ~75s) | ✗ (Script limité) | ✗ (public) | ✗ (PoW) | ✓✓ | Excellent paiements confidentiels, insuffisant smart contracts |
|
||||
| Monero | ✓✓ (RingCT, Ring Sig) | △ (PoW, ~2min) | ✗ | ✗ (public) | ✗ (PoW) | ✓✓ | Idem Zcash, moins adapté |
|
||||
| Aleo | ✓✓ (ZK natif, Leo language) | ✓ (PoS, ~15s) | ✓ (ZK-native execution) | △ | ✓✓ | △ (mainnet récent) | Très prometteur, écosystème jeune |
|
||||
| Hyperledger Fabric | ✓✓ (canaux privés) | ✓✓ (déterministe, Raft/PBFT) | ✓✓ (chaincode mature) | ✓✓ (MSP, canaux) | ✓✓ | ✓✓ | Idéal consortium fermé |
|
||||
| Cosmos SDK + IBC | △ (dépend de la zone) | ✓✓ (Tendermint ~6s) | ✓ (CosmWasm) | ✓✓ (zones souveraines) | ✓✓ | ✓✓ | Excellente base multi-consortium |
|
||||
| Polkadot / Substrate | △ | ✓ (GRANDPA ~12s) | ✓ (ink!, EVM via frontier) | ✓✓ (parachains) | ✓✓ | ✓ | Adapté multi-consortium, complexe |
|
||||
|
||||
**Analyse détaillée des blockchains les plus pertinentes** :
|
||||
|
||||
**Hyperledger Fabric** [Androulaki et al., 2018] est une blockchain permissionnée conçue pour les consortiums d'entreprises. Son architecture modulaire (orderers, peers, MSP) permet une gouvernance fine des membres du consortium. Les canaux privés permettent à un sous-ensemble de membres d'effectuer des transactions invisibles aux autres membres. La finalité est déterministe (pas de forks possibles avec Raft ou PBFT), propriété critique pour le contexte DTN où la réconciliation de forks est difficile. Son principal défaut est l'absence de résistance trustless : la confiance dans les MSP (Membership Service Providers) est requise, ce qui convient aux consortiums fermés mais rend la solution inadaptée aux échanges inter-consortium entre adversaires potentiels.
|
||||
|
||||
**Cosmos SDK + IBC** [Kwon & Buchman, 2016] offre une architecture de zones souveraines interconnectées via le protocole IBC (Inter-Blockchain Communication). Chaque zone peut être configurée indépendamment (algorithme de consensus, gouvernance, smart contracts), permettant des niveaux de sécurité différenciés selon le contexte. Le consensus Tendermint BFT [Buchman, 2016] offre une finalité déterministe en environ 6 secondes — compatible avec les contraintes DTN raisonnables. L'absence de confidentialité native est le principal défaut : les transactions IBC sont visibles dans les logs des deux zones impliquées.
|
||||
|
||||
**Secret Network** utilise des enclaves TEE (Trusted Execution Environment) Intel SGX pour exécuter les smart contracts dans un environnement chiffré. Les entrées, les sorties, et l'état des smart contracts sont chiffrés pour les validateurs eux-mêmes — seul le code est public. Cette propriété est exceptionnelle pour un contexte hostile. La principale limitation est la dépendance à Intel SGX, une technologie matérielle présentant des vulnérabilités de canal latéral documentées [Costan & Devadas, 2016]. La compatibilité Cosmos offre une interopérabilité avec un écosystème large.
|
||||
|
||||
**Zcash** [Ben-Sasson et al., 2014 ; Hopwood et al., 2016] utilise les zk-SNARKs pour permettre des transactions shielded dont les montants et les adresses sont cryptographiquement cachés. C'est la solution la plus mature pour les paiements confidentiels purs, sans dépendance matérielle (contrairement aux TEE). Sa limitation principale est l'absence de smart contracts expressifs : le langage Script de Zcash ne permet pas d'automatiser le règlement conditionnel requis pour un marché de ressources complexe.
|
||||
|
||||
**Oasis Network** [Oasis Labs, 2020] combine TEE et blockchain pour offrir des "ParaThreads" confidentiels : des environnements d'exécution chiffrés où les calculs se déroulent à l'abri des validateurs. L'EVM confidentiel (Sapphire ParaTime) permet d'exécuter des smart contracts Solidity avec confidentialité des états internes. La compatibilité avec l'écosystème Cosmos et l'EVM en fait un candidat particulièrement polyvalent.
|
||||
|
||||
### 5.4 Recommandation Architecturale
|
||||
|
||||
La proposition retient une architecture hybride à deux niveaux :
|
||||
|
||||
**Niveau 1 — Intra-consortium** : Hyperledger Fabric ou une zone Cosmos permissionnée. La gouvernance est assurée par le consortium via les MSP (Fabric) ou la gouvernance on-chain (Cosmos). Les transactions intra-consortium bénéficient de canaux privés (Fabric) ou d'une confidentialité applicative. La finalité déterministe garantit l'absence de forks. Ce niveau traite 95%+ des transactions dans un déploiement opérationnel normal.
|
||||
|
||||
**Niveau 2 — Inter-consortium et règlement public** : Secret Network ou Oasis Network pour les transactions entre acteurs de consortiums distincts, bénéficiant de smart contracts confidentiels. Les attestations de consommation de ressources sont soumises on-chain comme ZK-proofs hors-chaîne, permettant de prouver la bonne exécution sans révéler le contenu des calculs [Groth, 2016].
|
||||
|
||||
**Gouvernance** : chaque consortium maintient une structure de gouvernance multi-sig avec seuil de quorum configurable. Les décisions d'admission et d'exclusion de membres requièrent un quorum configurable (typiquement 2/3) pour résister aux attaques de corruption minoritaire.
|
||||
|
||||
---
|
||||
|
||||
## 6. Verrous Conceptuels et Technologiques
|
||||
|
||||
### Verrou 1 — Identité sans Autorité Centrale (TOFU)
|
||||
|
||||
**Problème** : Comment établir qu'une clé publique appartient bien à l'acteur qu'elle prétend représenter, sans autorité de certification centrale ? Le paradigme TOFU (Trust On First Use) accepte la clé lors du premier contact et avertit en cas de changement — solution pragmatique mais vulnérable à l'interception du premier contact.
|
||||
|
||||
**Solutions** : Les identifiants décentralisés (DID) définis par la spécification W3C [W3C, 2022] permettent à des acteurs de créer des identités auto-certifiées ancrées dans une blockchain ou un réseau de confiance distribué, sans dépendance à une autorité centrale. Les identifiants libp2p (PeerID dérivé de la clé publique) offrent une auto-certification primitive.
|
||||
|
||||
**Challenge résiduel** : L'ancrage d'une identité DID dans une blockchain publique révèle l'existence de l'identité à tout observateur. Pour un contexte hostile, des identités éphémères rotatoires avec des mécanismes de reconnaissance hors-bande constituent une alternative, au prix d'une complexité opérationnelle plus élevée.
|
||||
|
||||
### Verrou 2 — Bootstrap Trustless
|
||||
|
||||
**Problème** : Le premier contact avec le réseau requiert de connaître au moins un pair de confiance. Ces seeds constituent un ancre de confiance qui ne peut être supprimée — elle peut seulement être diversifiée et distribuée pour réduire le risque de compromission partielle.
|
||||
|
||||
**Solutions** : Diversification des seeds (géographique, organisationnelle, technologique). Distribution des seeds par canal hors-bande pour les environnements à haute sécurité. DHT locale persistante pour les reconnexions.
|
||||
|
||||
**Challenge résiduel** : Aucun système sans seed codés en dur ou sans canal hors-bande de confiance ne peut bootstrapper de manière véritablement trustless. Les seeds constituent nécessairement une ancre de confiance minimale.
|
||||
|
||||
### Verrou 3 — Résistance Sybil sans Coût Cryptoéconomique
|
||||
|
||||
**Problème** : Douceur [Douceur, 2002] a démontré que la résistance Sybil est impossible sans coût d'entrée ou autorité centrale. Pour les réseaux ouverts en environnement hostile, ni la PSK organisationnelle ni le scoring comportemental ne constituent des solutions complètes : la PSK est efficace mais requiert une coordination hors-bande pour chaque nouvel entrant ; le scoring peut être manipulé sur le long terme par un adversaire patient.
|
||||
|
||||
**Solutions** : PSK organisationnelle pour les réseaux fermés. Scoring comportemental multidimensionnel comme couche de défense secondaire. Pour les réseaux ouverts, un dépôt cryptoéconomique (stake on-chain) comme coût d'entrée minimum.
|
||||
|
||||
**Challenge résiduel** : Les réseaux entièrement ouverts et hostiles sans coût d'entrée cryptoéconomique restent vulnérables à des attaques Sybil patient et bien financées.
|
||||
|
||||
### Verrou 4 — Cohérence sans Consensus Fort (CAP)
|
||||
|
||||
**Problème** : Le théorème CAP [Brewer, 2000 ; Gilbert & Lynch, 2002] impose un choix entre cohérence et disponibilité lors d'un partitionnement. Pour la couche de découverte, choisir CP rendrait le système indisponible lors des partitions DTN fréquentes.
|
||||
|
||||
**Solution** : Choix délibéré du profil AP pour la découverte (cohérence éventuelle, disponibilité prioritaire), CP pour le règlement blockchain (cohérence forte, disponibilité conditionnelle au quorum). Les conflits d'état lors de la réunification sont résolus par Last-Write-Wins ou par vecteurs d'horloge selon la criticité des enregistrements.
|
||||
|
||||
**Challenge résiduel** : La cohérence éventuelle autorise des fenêtres temporaires d'état incohérent qui peuvent être exploitées par un adversaire contrôlant l'instant de la réunification.
|
||||
|
||||
### Verrou 5 — Détection de Panne sans Oracle (FLP)
|
||||
|
||||
**Problème** : Fischer, Lynch et Paterson [Fischer, Lynch & Paterson, 1985] ont démontré qu'en présence d'asynchronisme, aucun algorithme déterministe ne peut distinguer un pair défaillant d'un pair lent. Toute détection de panne est donc probabiliste et sujette à des faux positifs.
|
||||
|
||||
**Solution** : Le protocole SWIM [Das et al., 2002] offre une approche infection-style avec gestion des suspicions (un pair suspecté d'être défaillant n'est pas immédiatement exclu mais marqué comme suspect, et la confirmation de sa panne requiert la convergence de plusieurs observations indépendantes). Cette approche est adaptée aux réseaux DTN où les faux positifs de détection de panne sont fréquents.
|
||||
|
||||
**Challenge résiduel** : Dans les réseaux DTN à très haute latence, les délais de confirmation de panne peuvent atteindre plusieurs cycles orbitaux, pendant lesquels un pair défaillant (ou malveillant) reste actif dans les tables de voisinage.
|
||||
|
||||
### Verrou 6 — Confidentialité des Transactions sur Chaîne Publique
|
||||
|
||||
**Problème** : La transparence des blockchains publiques est structurellement antagoniste à la confidentialité opérationnelle. Même les blockchains pseudonymes permettent la reconstruction partielle des graphes de transactions [Meiklejohn et al., 2013].
|
||||
|
||||
**Solutions** : ZK-SNARKs [Ben-Sasson et al., 2014] pour masquer cryptographiquement les montants et les adresses. TEE (Trusted Execution Environment) pour l'exécution confidentielle des smart contracts [Oasis Labs, 2020]. Blockchains permissionnées avec canaux privés [Androulaki et al., 2018].
|
||||
|
||||
**Challenge résiduel** : Les solutions ZK sont encore en cours de maturation pour les smart contracts complexes. Les solutions TEE présentent des dépendances matérielles avec des vulnérabilités de canal latéral documentées. Les blockchains permissionnées requièrent une confiance dans les opérateurs du réseau.
|
||||
|
||||
### Verrou 7 — DTN et Transactions Intermittentes
|
||||
|
||||
**Problème** : Les blockchains classiques supposent une connectivité continue entre validateurs. Dans un réseau DTN, un validateur peut être absent pendant plusieurs heures, bloquant l'atteinte du quorum et donc le traitement des transactions.
|
||||
|
||||
**Solutions** :
|
||||
- **State channels** : deux parties pré-engagent des fonds dans un canal off-chain et échangent des signatures hors-chaîne pendant une période prolongée, ne soumettant le résultat final on-chain qu'à la clôture du canal. Cette approche tolère des périodes de déconnexion arbitrairement longues.
|
||||
- **Optimistic rollups** : les transactions sont soumises on-chain avec une fenêtre de contestation. En l'absence de contestation, la transaction est finalisée. Adapté aux environnements à faible fraude mais à connectivité intermittente.
|
||||
- **Signature différée** : les transactions sont signées localement avec un timestamp, accumulées pendant la période hors-ligne, et soumises en batch lors de la prochaine fenêtre de connexion. Le smart contract vérifie la validité temporelle des signatures.
|
||||
|
||||
**Challenge résiduel** : Les state channels requièrent une connexion initiale pour l'engagement des fonds. Les optimistic rollups introduisent des fenêtres de contestation de plusieurs heures — compatibles DTN mais augmentant la latence de finalité effective.
|
||||
|
||||
### Verrou 8 — Oracle et Preuve de Consommation Réelle
|
||||
|
||||
**Problème** : Szabo [Szabo, 1997] a identifié la difficulté fondamentale d'ancrer des contrats sur des événements du monde réel. Un smart contract de règlement de ressources doit vérifier qu'un calcul s'est réellement exécuté, que des données ont été réellement livrées — mais un smart contract est exécuté dans un environnement blockchain isolé du monde extérieur.
|
||||
|
||||
**Solutions** :
|
||||
- **TEE attestations** : un calcul exécuté dans une enclave Intel SGX ou ARM TrustZone génère une attestation cryptographique signée par le microprocesseur lui-même, prouvant que le code spécifié s'est exécuté dans un environnement sécurisé.
|
||||
- **ZK-proof d'exécution** : un ZKP prouve la bonne exécution d'un programme sur des entrées données sans révéler ni les entrées ni les sorties. Des systèmes comme zkEVM et les ZKVM expérimentaux permettent cette approche pour des programmes arbitraires [Groth, 2016].
|
||||
- **Challenge-response** : le consommateur soumet une preuve interactive (échantillonnage aléatoire de résultats vérifiables) que le fournisseur ne peut produire qu'en ayant réellement exécuté le calcul.
|
||||
|
||||
**Challenge résiduel** : Les TEE présentent des vulnérabilités de canal latéral [Costan & Devadas, 2016]. Les ZK-proofs d'exécution générique restent coûteux à générer pour des programmes complexes.
|
||||
|
||||
---
|
||||
|
||||
## 7. Gestion des Consortiums et Confiance Relative
|
||||
|
||||
### 7.1 Modèle de Confiance Relative (Non-Binaire)
|
||||
|
||||
La confiance accordée à un pair est formalisée comme un vecteur multidimensionnel de scores, agrégé en un score scalaire dans [0, 100] :
|
||||
|
||||
```
|
||||
Confiance(pair, t) = f(uptime_ratio(t), challenge_precision(t),
|
||||
witness_coherence(t), age(pair),
|
||||
diversity_contribution(t))
|
||||
```
|
||||
|
||||
Ce score est mis à jour à chaque interaction et décroît graduellement en l'absence d'observations récentes (décroissance exponentielle avec demi-vie paramétrable). Un pair absent depuis longtemps n'est pas nécessairement malveillant — son score se dégrade pour refléter l'incertitude croissante sur son état.
|
||||
|
||||
Un modèle binaire échoue parce qu'il ignore cette incertitude temporelle : un pair qui obtient le statut "de confiance" le conserve indéfiniment, même si son comportement récent est suspect. Le modèle continu à décroissance temporelle force une réévaluation permanente.
|
||||
|
||||
### 7.2 Architecture de Consortium à Trois Niveaux
|
||||
|
||||
**Niveau 1 — Consortium PSK** : Pairs partageant une clé pré-partagée (Pre-Shared Key) distribuée par voie organisationnelle sécurisée. La PSK est incorporée dans le handshake Noise, garantissant que seuls les membres du consortium peuvent établir des connexions dans cet espace. La confiance a priori est élevée (score initial : 70/100). Ce niveau correspond aux pairs appartenant à la même organisation ou unité opérationnelle.
|
||||
|
||||
**Niveau 2 — Consortium à Attestation** : Pairs d'organisations alliées disposant d'une attestation signée par un membre de niveau 1. L'attestation contient la clé publique du pair, sa période de validité, et les ressources auxquelles il est autorisé à accéder. La confiance a priori est modérée (score initial : 40/100). Ce niveau correspond aux partenaires de coalition.
|
||||
|
||||
**Niveau 3 — Réseau Ouvert** : Pairs sans attestation ni PSK. La confiance initiale est nulle (score : 0/100) et ne peut augmenter que par accumulation d'observations comportementales positives sur une période prolongée. L'accès aux ressources critiques est restreint jusqu'à ce que le score dépasse un seuil configurable.
|
||||
|
||||
### 7.3 Révocation et Propagation de Méfiance
|
||||
|
||||
La révocation d'un pair est propagée par un tombstone signé par un membre de niveau 1, diffusé par gossip épidémique [Das et al., 2002]. Tous les pairs recevant ce tombstone mettent immédiatement le score du pair révoqué à zéro et le placent sur une liste noire temporaire. La liste noire est elle-même signée et distribuée avec un TTL configurable, permettant une réhabilitation éventuelle après révocation temporaire.
|
||||
|
||||
Pour les situations de compromission active (clé privée extraite), le protocole de révocation d'urgence est initié simultanément par plusieurs membres de niveau 1 pour éviter qu'un adversaire contrôlant un unique membre puisse bloquer la révocation.
|
||||
|
||||
### 7.4 Consortiums Dynamiques
|
||||
|
||||
L'admission d'un nouveau membre est effectuée sans redémarrage du réseau : la nouvelle attestation est diffusée via gossip, et les membres existants commencent à accepter des connexions du nouveau membre dès réception de l'attestation valide. La rotation des PSK suit un calendrier prédéfini avec une fenêtre de grâce permettant la transition progressive (ancienne et nouvelle PSK acceptées simultanément pendant une fenêtre configurable de 24 à 72 heures).
|
||||
|
||||
---
|
||||
|
||||
## 8. Adaptations Indispensables pour le Contexte Spatial/Embarqué
|
||||
|
||||
### 8.1 Protocoles DTN-Compatibles
|
||||
|
||||
Tous les protocoles de la couche de découverte doivent être adaptés pour tolérer des latences aller-retour de plusieurs minutes et des interruptions de connectivité de plusieurs heures. Les timeouts TCP standard (généralement inférieurs à quelques minutes) sont remplacés par des délais configurables compatibles avec les orbites prévisibles. Les messages critiques sont acquittés avec retransmission exponentielle.
|
||||
|
||||
### 8.2 Minimisation des Échanges Verbeux
|
||||
|
||||
Les protocoles verbeux (abondance de handshakes, de messages de keepalive, de synchronisations de tables de routage) sont remplacés par des protocoles binaires compacts (Protocol Buffers, CBOR) avec compression différentielle. La fréquence des messages de maintenance est adaptée dynamiquement à la bande passante disponible : sur un lien satellite à 64 kbps, le heartbeat périodique est espacé de plusieurs minutes, non de quelques secondes.
|
||||
|
||||
### 8.3 Signature Différée et Soumission Batch
|
||||
|
||||
Dans les environnements DTN, les transactions sont signées localement avec un timestamp et accumulées dans une file de persistance locale. Lors des fenêtres de connexion disponibles, elles sont soumises en batch à la couche de règlement. Le smart contract vérifie que le timestamp de signature se situe dans une fenêtre acceptable et que le nonce est unique (prévention des replays).
|
||||
|
||||
### 8.4 Module de Sécurité Matérielle
|
||||
|
||||
La clé privée principale de chaque nœud est hébergée dans un module de sécurité matérielle (HSM) ou une enclave sécurisée (ARM TrustZone, RISC-V Keystone) rendant son extraction physique difficile même en cas de capture du nœud. Les opérations cryptographiques (signature, déchiffrement) sont déléguées au module HSM sans que la clé privée ne quitte jamais l'enclave.
|
||||
|
||||
### 8.5 Mode Autonome Complet
|
||||
|
||||
Chaque nœud doit être capable d'opérer en mode complètement autonome (zéro connexion réseau) pendant des durées pouvant atteindre plusieurs jours, en maintenant un état cohérent de ses ressources locales, en traitant les requêtes locales, et en accumulant les transactions en attente de règlement. La resynchronisation après reconnexion est déterministe et complète, résolvant les conflits d'état par un mécanisme de résolution configuré à l'avance.
|
||||
|
||||
---
|
||||
|
||||
## Références Bibliographiques
|
||||
|
||||
**[Androulaki et al., 2018]** Androulaki, E., Barger, A., Bortnikov, V., Cachin, C., Christidis, K., De Caro, A., Enyeart, D., Ferris, C., Laventman, G., Manevich, Y., Muralidharan, S., Murthy, C., Nguyen, B., Sethi, M., Singh, G., Smith, K., Sorniotti, A., Stathakopoulou, C., Vukolic, M., Cocco, S. W., & Yellick, J. (2018). Hyperledger Fabric: a distributed operating system for permissioned blockchains. In *Proceedings of the 13th EuroSys Conference*, article 30.
|
||||
|
||||
**[Baumgart & Meinert, 2007]** Baumgart, I., & Meinert, S. (2007). S/Kademlia: A practicable approach towards secure key-based routing. In *Proceedings of the 2007 International Conference on Parallel and Distributed Systems (ICPADS)*, pp. 1–8.
|
||||
|
||||
**[Ben-Sasson et al., 2014]** Ben-Sasson, E., Chiesa, A., Garman, C., Green, M., Miers, I., Tromer, E., & Virza, M. (2014). Zerocash: Decentralized Anonymous Payments from Bitcoin. In *Proceedings of the 2014 IEEE Symposium on Security and Privacy (S&P)*, pp. 459–474.
|
||||
|
||||
**[Brewer, 2000]** Brewer, E. A. (2000). Towards robust distributed systems. In *Proceedings of the 19th Annual ACM Symposium on Principles of Distributed Computing (PODC)*, pp. 7.
|
||||
|
||||
**[Buchman, 2016]** Buchman, E. (2016). *Tendermint: Byzantine Fault Tolerance in the Age of Blockchains*. M.Sc. Thesis, University of Guelph.
|
||||
|
||||
**[Cerf et al., 2007]** Cerf, V., Burleigh, S., Hooke, A., Torgerson, L., Durst, R., Scott, K., Fall, K., & Weiss, H. (2007). Delay-Tolerant Networking Architecture. *RFC 4838*, IETF.
|
||||
|
||||
**[Costan & Devadas, 2016]** Costan, V., & Devadas, S. (2016). Intel SGX Explained. *IACR Cryptology ePrint Archive*, Report 2016/086.
|
||||
|
||||
**[Daian et al., 2020]** Daian, P., Goldfeder, S., Kell, T., Li, Y., Zhao, X., Bentov, I., Breidenbach, L., & Juels, A. (2020). Flash Boys 2.0: Frontrunning in Decentralized Exchanges, Miner Extractable Value, and Consensus Instability. In *Proceedings of the 2020 IEEE Symposium on Security and Privacy (S&P)*, pp. 910–927.
|
||||
|
||||
**[Das et al., 2002]** Das, A., Gupta, I., & Motivala, A. (2002). SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol. In *Proceedings of the 2002 International Conference on Dependable Systems and Networks (DSN)*, pp. 303–312.
|
||||
|
||||
**[Douceur, 2002]** Douceur, J. R. (2002). The Sybil Attack. In *Proceedings of the 1st International Workshop on Peer-to-Peer Systems (IPTPS)*, LNCS 2429, pp. 251–260.
|
||||
|
||||
**[Fall, 2003]** Fall, K. (2003). A delay-tolerant network architecture for challenged internets. In *Proceedings of SIGCOMM 2003*, pp. 27–34.
|
||||
|
||||
**[Fischer, Lynch & Paterson, 1985]** Fischer, M. J., Lynch, N. A., & Paterson, M. S. (1985). Impossibility of distributed consensus with one faulty process. *Journal of the ACM (JACM)*, 32(2), 374–382.
|
||||
|
||||
**[Gilbert & Lynch, 2002]** Gilbert, S., & Lynch, N. (2002). Brewer's conjecture and the feasibility of consistent, available, partition-tolerant web services. *ACM SIGACT News*, 33(2), 51–59.
|
||||
|
||||
**[Goldwasser, Micali & Rackoff, 1989]** Goldwasser, S., Micali, S., & Rackoff, C. (1989). The knowledge complexity of interactive proof systems. *SIAM Journal on Computing*, 18(1), 186–208.
|
||||
|
||||
**[Groth, 2016]** Groth, J. (2016). On the size of pairing-based non-interactive arguments. In *Proceedings of the 35th Annual International Conference on the Theory and Applications of Cryptographic Techniques (EUROCRYPT 2016)*, LNCS 9666, pp. 305–326.
|
||||
|
||||
**[Hopwood et al., 2016]** Hopwood, D., Bowe, S., Hornby, T., & Wilcox, N. (2016). *Zcash Protocol Specification*. Electric Coin Company. https://zips.z.cash/protocol/protocol.pdf
|
||||
|
||||
**[Kwon & Buchman, 2016]** Kwon, J., & Buchman, E. (2016). *Cosmos: A Network of Distributed Ledgers*. Whitepaper. https://v1.cosmos.network/resources/whitepaper
|
||||
|
||||
**[Maymounkov & Mazières, 2002]** Maymounkov, P., & Mazières, D. (2002). Kademlia: A Peer-to-Peer Information System Based on the XOR Metric. In *Proceedings of IPTPS 2002*, LNCS 2429, pp. 53–65.
|
||||
|
||||
**[Meiklejohn et al., 2013]** Meiklejohn, S., Pomarole, M., Jordan, G., Levchenko, K., McCoy, D., Voelker, G. M., & Savage, S. (2013). A Fistful of Bitcoins: Characterizing Payments Among Men with No Names. In *Proceedings of IMC 2013*, pp. 127–140.
|
||||
|
||||
**[Nakamoto, 2008]** Nakamoto, S. (2008). Bitcoin: A Peer-to-Peer Electronic Cash System. https://bitcoin.org/bitcoin.pdf
|
||||
|
||||
**[Oasis Labs, 2020]** Oasis Network (2020). *Oasis Network Primer*. Oasis Labs. https://oasisprotocol.org/primer
|
||||
|
||||
**[Perrin, 2018]** Perrin, T. (2018). *The Noise Protocol Framework*. https://noiseprotocol.org/noise.pdf
|
||||
|
||||
**[Stoica et al., 2003]** Stoica, I., Morris, R., Liben-Nowell, D., Karger, D. R., Kaashoek, M. F., Dabek, F., & Balakrishnan, H. (2003). Chord: A scalable peer-to-peer lookup protocol for internet applications. *IEEE/ACM Transactions on Networking*, 11(1), 17–32.
|
||||
|
||||
**[Sun et al., 2017]** Sun, S.-F., Au, M. H., Liu, J. K., & Yuen, T. H. (2017). RingCT 2.0: A Compact Accumulator-Based (Linkable Ring Signature) Protocol for Blockchain Cryptocurrency Monero. In *Proceedings of ESORICS 2017*, LNCS 10493, pp. 456–474.
|
||||
|
||||
**[Szabo, 1997]** Szabo, N. (1997). Formalizing and securing relationships on public networks. *First Monday*, 2(9).
|
||||
|
||||
**[W3C, 2022]** W3C (2022). *Decentralized Identifiers (DIDs) v1.0*. W3C Recommendation. https://www.w3.org/TR/did-core/
|
||||
|
||||
**[Wood, 2014]** Wood, G. (2014). *Ethereum: A Secure Decentralised Generalised Transaction Ledger*. Ethereum Yellow Paper. https://ethereum.github.io/yellowpaper/paper.pdf
|
||||
|
||||
**[Wood, 2016]** Wood, G. (2016). *Polkadot: Vision for a Heterogeneous Multi-Chain Framework*. Whitepaper. https://polkadot.network/PolkaDotPaper.pdf
|
||||
410
docs/RISQUES_DECOUVERTE_TRANSACTIONS_DECENTRALISEES.md
Normal file
410
docs/RISQUES_DECOUVERTE_TRANSACTIONS_DECENTRALISEES.md
Normal file
@@ -0,0 +1,410 @@
|
||||
# Analyse des Risques d'une Découverte de Pair Sans Confiance et des Transactions de Données et Monétaires dans un Réseau Décentralisé
|
||||
|
||||
**Catégorie** : Sécurité des systèmes distribués — Analyse de risques
|
||||
**Domaine d'application** : Systèmes embarqués, contexte spatial, réseaux tolérants aux disruptions (DTN)
|
||||
**Version** : 1.0 — Mars 2026
|
||||
|
||||
---
|
||||
|
||||
## Résumé
|
||||
|
||||
Les réseaux pair-à-pair décentralisés opérant dans des environnements hostiles posent des défis de sécurité fondamentalement différents de ceux rencontrés dans les infrastructures classiques. Ce document analyse les risques inhérents à deux problématiques interdépendantes : d'une part, la découverte de pairs en l'absence d'autorité centrale de confiance, et d'autre part, la conduite de transactions portant sur des données et des actifs monétaires dans un réseau potentiellement infiltré, surveillé, ou partiellement contrôlé par des adversaires.
|
||||
|
||||
Le périmètre de cette analyse est délimité par le contexte des systèmes distribués embarqués et spatiaux opérant selon le paradigme DTN (Delay/Disruption Tolerant Networking), caractérisés par des latences élevées, une connectivité intermittente, et la coprésence d'acteurs aux niveaux de confiance hétérogènes.
|
||||
|
||||
Le modèle de menace retenu distingue deux classes d'adversaires : l'adversaire rationnel à ressources limitées, dont les actions sont guidées par le rapport coût/bénéfice, et l'adversaire disposant de ressources quasi-illimitées, capable de conduire des attaques passives prolongées (watchers) ou actives (injection, manipulation). Dans ce second cas, les garanties probabilistes offertes par les protocoles cryptographiques standards peuvent être insuffisantes sur des horizons temporels longs.
|
||||
|
||||
L'analyse couvre trois couches distinctes : la couche réseau et transport, la couche de découverte P2P, et la couche de transactions (données et monétaires). Pour chacune, nous identifions les vecteurs d'attaque connus de la littérature, leur criticité relative selon le contexte d'utilisation (réseau privé vs réseau public hostile), et les mécanismes de mitigation disponibles avec leurs limites résiduelles. Une synthèse tabulaire conclut l'analyse et permet d'orienter les choix architecturaux vers les risques les plus critiques.
|
||||
|
||||
La conclusion principale de cette étude est que la confiance ne peut, dans ce contexte, être que probabiliste et relative — jamais binaire ni absolue — et que toute architecture ignorant cette réalité est structurellement vulnérable sur le long terme.
|
||||
|
||||
---
|
||||
|
||||
## 1. Contexte et Modèle de Menace
|
||||
|
||||
### 1.1 Le Problème Fondamental : Confiance sans Autorité Centrale
|
||||
|
||||
La question de la coordination entre agents sans autorité centrale de confiance est l'un des problèmes fondamentaux de l'informatique distribuée. Lamport, Shostak et Pease [Lamport et al., 1982] ont formalisé ce problème sous la forme du "problème des généraux byzantins" : comment un ensemble de processus peut-il atteindre un consensus lorsque certains d'entre eux peuvent se comporter de manière arbitrairement malveillante ? Leur résultat fondamental établit qu'un consensus fiable est possible si et seulement si la proportion de processus défaillants est strictement inférieure à un tiers de l'ensemble.
|
||||
|
||||
Ce résultat théorique contraint directement toute architecture distribuée opérant dans un environnement hostile : même en supposant que les mécanismes cryptographiques sont parfaits, le nombre de nœuds compromis ou malveillants que le système peut tolérer est borné par la proportion d'un tiers. Au-delà de ce seuil, aucun algorithme de consensus déterministe ne peut garantir la cohérence.
|
||||
|
||||
Par ailleurs, le résultat FLP [Fischer, Lynch & Paterson, 1985] démontre l'impossibilité d'un algorithme de consensus déterministe tolérant même une seule panne dans un système purement asynchrone. Ce théorème impose des compromis entre disponibilité, cohérence et tolérance aux pannes que toute architecture doit assumer explicitement, sous peine de créer des zones d'ombre dans les garanties de sécurité.
|
||||
|
||||
### 1.2 Contexte Embarqué, Spatial et Hostile : Spécificités DTN
|
||||
|
||||
Les réseaux de communication spatiaux et les réseaux embarqués militaires ou industriels partagent des caractéristiques qui les distinguent radicalement des réseaux terrestres filaires sur lesquels la majorité des protocoles P2P ont été conçus.
|
||||
|
||||
Fall [Fall, 2003] a posé les bases du paradigme DTN (Delay-Tolerant Networking), conçu pour des environnements où les chemins bout-en-bout n'existent pas en permanence, où les latences peuvent s'étendre de secondes à plusieurs heures (communications interplanétaires), et où les protocoles de transport classiques fondés sur TCP/IP s'avèrent inutilisables. La RFC 4838 [Cerf et al., 2007] formalise l'architecture DTN dans le cadre de l'IETF, en introduisant le concept de couche "bundle" qui stocke les messages et les retransmet lors des fenêtres de contact.
|
||||
|
||||
Dans ce contexte, les hypothèses habituelles des protocoles P2P s'effondrent :
|
||||
- L'hypothèse de connectivité partielle mais permanente disparaît ;
|
||||
- Les timeouts TCP standard deviennent inutilisables ;
|
||||
- Les heartbeats périodiques ne peuvent plus être garantis ;
|
||||
- La détection de panne par timeout devient ambiguë (panne ou simple absence de connectivité ?).
|
||||
|
||||
Ces contraintes ne sont pas seulement techniques : elles créent des fenêtres d'opportunité pour des adversaires capables d'exploiter les périodes de déconnexion pour injecter de faux états ou corrompre des enregistrements en l'absence de réfutation possible.
|
||||
|
||||
### 1.3 Modèle d'Adversaire : Rationnel à Ressources Limitées vs Adversaire d'État
|
||||
|
||||
La littérature distingue classiquement deux grandes classes d'adversaires dans les protocoles cryptographiques :
|
||||
|
||||
**L'adversaire rationnel à ressources limitées** est un acteur dont le comportement est guidé par l'optimisation d'un objectif économique. Il attaquera si le bénéfice attendu dépasse le coût de l'attaque. Ce modèle est pertinent pour les attaques opportunistes sur les réseaux P2P publics.
|
||||
|
||||
**L'adversaire à ressources illimitées** peut financer des attaques dont le coût dépasse le bénéfice apparent, dans une perspective stratégique à long terme. Il peut maintenir une présence passive dans le réseau pendant des mois ou des années sans se manifester activement (watchers), accumulant des informations permettant ultérieurement de dé-anonymiser des participants ou de reconstruire des stratégies opérationnelles.
|
||||
|
||||
Dans un contexte spatial ou militaire embarqué, c'est ce second modèle qui prévaut. Les hypothèses de sécurité doivent donc être dimensionnées pour un adversaire capable de :
|
||||
- Capturer physiquement des nœuds et en extraire les secrets ;
|
||||
- Maintenir une écoute passive à long terme sur tous les canaux visibles ;
|
||||
- Contrôler silencieusement des nœuds compromis pendant des semaines (Stealthy Byzantine [Amir et al., 2011]) ;
|
||||
- Analyser les métadonnées de trafic même en présence de chiffrement de contenu.
|
||||
|
||||
### 1.4 Confiance Relative : Modèle Continu vs Binaire
|
||||
|
||||
Le modèle de confiance binaire (pair de confiance / pair non-fiable) est inadapté aux environnements complexes où les acteurs peuvent être partiellement compromis, temporairement indisponibles, ou simplement peu fiables sans être nécessairement malveillants. La notion de confiance doit être modélisée comme un continuum probabiliste.
|
||||
|
||||
Le théorème FLP [Fischer, Lynch & Paterson, 1985] établit qu'en présence d'asynchronisme, un observateur ne peut distinguer de manière déterministe un nœud défaillant d'un nœud simplement lent. Cela implique que tout jugement de confiance sur un pair distant est nécessairement probabiliste : il repose sur une accumulation d'observations cohérentes sur le temps, non sur une preuve déterministe.
|
||||
|
||||
Ce constat motive l'adoption de modèles de confiance à scores continus, intégrant des dimensions comportementales (disponibilité historique, précision des réponses, cohérence des données annoncées) plutôt que des jugements binaires susceptibles d'être manipulés par un adversaire capable de jouer sur les délais d'observation.
|
||||
|
||||
### 1.5 Propriétés de Sécurité Visées
|
||||
|
||||
Pour ce contexte, les propriétés de sécurité cibles sont les suivantes :
|
||||
|
||||
- **Authenticité** : chaque pair peut prouver son identité sans tiers de confiance (identité auto-certifiée).
|
||||
- **Intégrité** : les données transmises ou stockées ne peuvent être altérées sans détection.
|
||||
- **Disponibilité** : le système continue de fonctionner en mode dégradé en cas d'indisponibilité partielle.
|
||||
- **Confidentialité** : le contenu des échanges est opaque pour les observateurs non autorisés.
|
||||
- **Souveraineté** : les données appartiennent à leur créateur, qui contrôle leur cycle de vie.
|
||||
- **Non-traçabilité** : l'existence même des échanges doit, dans le cas le plus hostile, être dissimulée.
|
||||
|
||||
Ces propriétés sont partiellement antagonistes. La disponibilité et la confidentialité entrent souvent en tension : un système qui chiffre toutes ses communications peut voir sa disponibilité dégradée par l'overhead cryptographique ; un système qui optimise la découverte de pairs maximise la visibilité des participants.
|
||||
|
||||
---
|
||||
|
||||
## 2. Risques de la Couche Réseau et Transport
|
||||
|
||||
### 2.1 Man-in-the-Middle et Attaques Actives
|
||||
|
||||
L'attaque MitM (Man-in-the-Middle) consiste pour un adversaire actif à s'interposer entre deux pairs communicants, en se faisant passer pour chacun d'eux auprès de l'autre. Sans authentification mutuelle des parties, cette attaque est triviale sur tout réseau P2P non protégé.
|
||||
|
||||
Le Noise Protocol Framework [Perrin, 2018] offre une réponse adaptée : il fournit des primitives de handshake cryptographique permettant l'authentification mutuelle et l'établissement de canaux chiffrés sans infrastructure PKI centralisée. Le pattern Noise_XX permet une authentification mutuelle des clés publiques sans nécessiter une autorité de certification. Sa légèreté en fait un candidat adapté aux contraintes embarquées.
|
||||
|
||||
La principale limite résiduelle est celle du premier contact : si un pair ne connaît pas a priori la clé publique de son interlocuteur, le premier échange est vulnérable à un MitM qui substituerait sa propre clé (problème TOFU : Trust On First Use).
|
||||
|
||||
### 2.2 Eclipse Attacks — Isolation d'un Pair du Réseau
|
||||
|
||||
Une attaque par éclipse (eclipse attack) vise à monopoliser l'ensemble des connexions entrantes et sortantes d'un pair cible, de sorte que toutes les informations que ce pair reçoit du réseau passent par des nœuds contrôlés par l'adversaire [Heilman et al., 2015]. Une fois isolé, le pair peut être manipulé : l'adversaire lui présente une vue falsifiée du réseau, peut censurer ses messages sortants, ou lui soumettre de fausses transactions.
|
||||
|
||||
Heilman et al. démontrent que cette attaque est réalisable sur Bitcoin avec un nombre limité de connexions adversariales, en exploitant la structure de la table de routage. Pour les réseaux P2P plus généraux, la résistance à cette attaque passe par la diversification des sources de pairs (bootstrap multi-seeds, connexions aléatoires dans des sous-espaces distincts du graphe).
|
||||
|
||||
### 2.3 Partitionnement Réseau et Problème CAP
|
||||
|
||||
Le théorème CAP, formulé par Brewer [Brewer, 2000] et formalisé par Gilbert et Lynch [Gilbert & Lynch, 2002], établit qu'un système distribué ne peut simultanément garantir la cohérence (Consistency), la disponibilité (Availability) et la tolérance au partitionnement réseau (Partition tolerance). Face à un partitionnement, tout système doit choisir entre cohérence et disponibilité.
|
||||
|
||||
Dans un contexte DTN, les partitions réseau ne sont pas des événements exceptionnels : elles sont la norme opérationnelle. Un système de découverte de pairs en contexte spatial doit délibérément favoriser la disponibilité (AP) sur la cohérence forte : un nœud doit pouvoir continuer à opérer même s'il ne peut pas vérifier en temps réel l'état de ses pairs. La cohérence forte est réservée aux opérations critiques (règlement de transactions monétaires), où elle doit être assurée par un mécanisme de consensus distinct.
|
||||
|
||||
### 2.4 Traffic Analysis par Acteurs Passifs — Le Watcher Problem
|
||||
|
||||
Le chiffrement de contenu ne suffit pas à protéger contre un adversaire passif capable d'analyser les métadonnées de trafic. Murdoch et Danezis [Murdoch & Danezis, 2005] ont démontré que même sur le réseau Tor, l'analyse des patterns de trafic (timing, volume, fréquence) permet de corréler des flux anonymisés avec leurs sources avec une précision significative.
|
||||
|
||||
Dans un réseau de découverte P2P, les métadonnées révèlent des informations critiques même en l'absence d'accès au contenu :
|
||||
- La fréquence des heartbeats révèle les cycles opérationnels d'un nœud ;
|
||||
- Le volume des échanges avec certains pairs révèle des relations privilégiées ;
|
||||
- Les patterns de connexion/déconnexion révèlent les horaires d'activité ;
|
||||
- L'évolution du nombre de pairs connus révèle des phases d'expansion ou de repli.
|
||||
|
||||
Sun et al. [Sun et al., 2015] ont quantifié l'efficacité de ces attaques sur les réseaux Bitcoin, montrant que l'identification de l'émetteur d'une transaction est possible avec une probabilité élevée à partir de l'analyse des patterns de diffusion, même sans accès au contenu chiffré.
|
||||
|
||||
### 2.5 Spécificités DTN/Spatial : Latences, Connectivité Intermittente, Impossibilité TCP
|
||||
|
||||
Les protocoles P2P conçus pour des réseaux terrestres supposent des connexions TCP stables, des latences de l'ordre de la milliseconde à la centaine de milliseconde, et une disponibilité continue des pairs actifs. Aucune de ces hypothèses ne tient dans un contexte spatial :
|
||||
|
||||
- Les communications Terre-orbite basse (LEO) introduisent des latences de 20 à 600 ms selon l'élévation ;
|
||||
- Les fenêtres de visibilité entre satellites imposent des périodes de contact discontinues ;
|
||||
- L'énergie disponible contraint le temps de transmission, forçant des modes de veille.
|
||||
|
||||
Ces contraintes rendent TCP inutilisable dans sa forme standard : les mécanismes de retransmission, de contrôle de flux et de gestion des timeouts sont dimensionnés pour des latences sub-secondes. Les protocoles de couche bundle définis dans la RFC 4838 [Cerf et al., 2007] offrent une alternative, mais leur adoption dans les systèmes P2P est embryonnaire.
|
||||
|
||||
---
|
||||
|
||||
## 3. Risques de la Couche de Découverte P2P (Sans Confiance)
|
||||
|
||||
### 3.1 Sybil Attack : Création Massive d'Identités
|
||||
|
||||
L'attaque Sybil, formalisée par Douceur [Douceur, 2002], consiste pour un adversaire à créer un grand nombre d'identités pseudonymes dans un réseau P2P, lui permettant d'acquérir une influence disproportionnée sur les mécanismes de consensus, de routage ou de réputation.
|
||||
|
||||
Le résultat fondamental de Douceur est que cette attaque est **insoluble sans coût d'entrée ou autorité centrale**. En l'absence d'un mécanisme rendant la création d'identités coûteuse (preuve de travail, preuve d'enjeu, dépôt cryptoéconomique) ou d'une autorité centrale capable de vérifier les identités, aucun système P2P ne peut garantir une résistance Sybil complète.
|
||||
|
||||
Dans un contexte embarqué à ressources limitées, les mécanismes de preuve de travail sont énergétiquement prohibitifs. Les solutions alternatives incluent les clés pré-partagées (PSK) organisationnelles pour les réseaux fermés, et le scoring comportemental pour pénaliser les pairs nouveaux ou peu fiables — sans résoudre complètement le problème pour les réseaux ouverts.
|
||||
|
||||
### 3.2 Routing Table Poisoning
|
||||
|
||||
Le DHT Kademlia [Maymounkov & Mazières, 2002] organise les pairs dans un espace d'adressage XOR, permettant la découverte de ressources en O(log n) sauts. Cependant, sa table de routage est vulnérable à l'empoisonnement : un adversaire peut insérer de fausses entrées dans la table d'un pair cible, redirigeant le trafic vers des nœuds malveillants.
|
||||
|
||||
Baumgart et Meinert [Baumgart & Meinert, 2007] ont proposé S/Kademlia comme extension sécurisée de Kademlia, ajoutant des contraintes cryptographiques sur la génération des identifiants de nœuds et des signatures sur les messages de routage. S/Kademlia réduit significativement la facilité d'empoisonnement des tables de routage, mais n'élimine pas complètement le risque pour un adversaire disposant de suffisamment de nœuds.
|
||||
|
||||
### 3.3 Attaque d'Éclipse sur DHT
|
||||
|
||||
Singh et al. [Singh et al., 2006] ont démontré des attaques d'éclipse spécifiques aux DHT : en contrôlant un sous-ensemble de nœuds strategiquement positionnés dans l'espace d'adressage, un adversaire peut isoler une région de la DHT, interceptant toutes les requêtes de découverte portant sur des identifiants tombant dans cette région.
|
||||
|
||||
Cette attaque est particulièrement dangereuse pour les systèmes où la DHT est utilisée non seulement pour la découverte mais aussi pour le stockage de données : les enregistrements stockés dans la région éclipsée peuvent être supprimés, falsifiés, ou rendus inaccessibles sans détection immédiate.
|
||||
|
||||
### 3.4 Bootstrap Poisoning : Compromission des Seeds d'Amorçage
|
||||
|
||||
Tout réseau P2P doit résoudre le problème du premier contact : un nœud rejoignant le réseau doit connaître au moins un pair existant pour s'y connecter. Les seeds d'amorçage (bootstrap nodes) constituent donc un point de défaillance unique ou une cible prioritaire pour un adversaire.
|
||||
|
||||
Bitcoin résout partiellement ce problème via des DNS seeds distribués opérés par plusieurs entités indépendantes. Tor utilise des directory authorities dont les clés publiques sont codées en dur dans le client. Dans un réseau embarqué hostile, ces approches présentent des limitations : les DNS seeds peuvent être censurés, et les clés publiques codées en dur peuvent être compromises si un nœud est capturé physiquement.
|
||||
|
||||
Un adversaire capable de contrôler les seeds d'amorçage peut orienter tous les nouveaux nœuds vers une partition malveillante du réseau, les isolant efficacement du réseau légitime.
|
||||
|
||||
### 3.5 Churn Excessif et Instabilité du Réseau
|
||||
|
||||
Rhea et al. [Rhea et al., 2004] ont étudié l'impact du churn (arrivées et départs fréquents de nœuds) sur la stabilité des DHT. Un churn élevé dégrade les performances de routage, fragmente les tables de routage, et peut rendre certaines régions de la DHT inaccessibles temporairement.
|
||||
|
||||
Dans un contexte DTN, le churn n'est pas exceptionnel mais structurel. Les nœuds apparaissent et disparaissent selon les fenêtres de contact, créant un churn permanent. Les mécanismes de maintenance des DHT (requêtes périodiques de vérification des voisins, repopulation des k-buckets) doivent être adaptés pour ne pas consommer une bande passante prohibitive dans ce contexte.
|
||||
|
||||
Un adversaire peut exploiter le churn en injectant massivement des nœuds éphémères : ces nœuds saturent les tables de routage, déplacent les nœuds légitimes, puis disparaissent avant d'être pénalisés par les mécanismes de scoring.
|
||||
|
||||
### 3.6 Collusion entre Pairs et Faux Consensus de Scoring
|
||||
|
||||
Les systèmes de réputation décentralisés sont vulnérables aux attaques de collusion : un groupe de nœuds malveillants peut s'attribuer mutuellement de bons scores, se recommander mutuellement, et dégrader les scores des nœuds légitimes par des témoignages falsifiés.
|
||||
|
||||
Cette attaque est particulièrement insidieuse car elle est difficile à distinguer d'un comportement légitime sans information hors-bande sur la topologie réelle du réseau. Les mécanismes de témoin croisé (un tiers indépendant observe le comportement du pair) réduisent ce risque mais ne l'éliminent pas si l'adversaire contrôle suffisamment de nœuds pour couvrir tous les angles d'observation.
|
||||
|
||||
### 3.7 Injection de Faux Enregistrements de Présence
|
||||
|
||||
Dans un contexte hostile, un adversaire peut injecter de faux enregistrements de présence dans la DHT, annonçant l'existence de ressources ou de nœuds fictifs. Cette pollution de la couche de découverte a plusieurs effets négatifs :
|
||||
- Elle gaspille la bande passante des pairs qui tentent de contacter des nœuds inexistants ;
|
||||
- Elle peut masquer la découverte de ressources légitimes en saturant les réponses de fausses entrées ;
|
||||
- Elle peut servir de leurre pour identifier quels nœuds recherchent quels types de ressources (surveillance active via pot de miel).
|
||||
|
||||
---
|
||||
|
||||
## 4. Risques des Transactions de Données
|
||||
|
||||
### 4.1 Data Poisoning : Altération Silencieuse des Enregistrements
|
||||
|
||||
La DHT Kademlia stocke les enregistrements à proximité des nœuds dont l'identifiant est proche de la clé de l'enregistrement. Un adversaire contrôlant des nœuds dans une région de l'espace d'adressage peut altérer silencieusement les enregistrements qui lui sont confiés, substituant des données falsifiées aux données légitimes.
|
||||
|
||||
La mitigation standard repose sur les enregistrements signés : chaque enregistrement porte la signature de son créateur, permettant de détecter toute altération. Cependant, cette protection suppose que la clé publique du créateur est connue et authentique — ce qui ramène au problème de la distribution des clés publiques.
|
||||
|
||||
### 4.2 Replay Attacks sur les Records Signés
|
||||
|
||||
Un adversaire peut capturer un enregistrement signé valide et le rejouer ultérieurement, même après que son auteur a révoqué ou remplacé cet enregistrement. En l'absence de mécanisme de nonce ou de timestamp vérifié cryptographiquement, les enregistrements capturés restent valides indéfiniment du point de vue de leur signature.
|
||||
|
||||
La protection standard combine les timestamps et les numéros de séquence dans la donnée signée. Cependant, dans un réseau DTN où les horloges ne sont pas nécessairement synchronisées, la vérification des timestamps introduit des contraintes supplémentaires.
|
||||
|
||||
### 4.3 Attaque de Tombstone Malveillante
|
||||
|
||||
Un tombstone est un enregistrement spécial signalant la suppression ou la révocation d'un enregistrement précédent. Un adversaire en possession de la clé privée d'un pair compromis peut émettre de faux tombstones pour invalider des enregistrements légitimes, effaçant effectivement la présence d'un nœud ou la disponibilité d'une ressource du point de vue du réseau.
|
||||
|
||||
Cette attaque est particulièrement grave dans les réseaux à haute durée de vie où les enregistrements ne se périmissent pas rapidement : un tombstone malveillant peut effacer des années d'historique légitime.
|
||||
|
||||
### 4.4 Souveraineté de la Donnée : Contrôle du Cycle de Vie
|
||||
|
||||
La question de qui contrôle l'expiration et la révocation d'un enregistrement est non triviale dans un réseau décentralisé. Dans les architectures DHT standard, les enregistrements expirent naturellement après un TTL fixe, mais leur renouvellement est à la charge du créateur. Si le créateur est temporairement indisponible (contexte DTN), ses enregistrements expirent, le rendant invisible du réseau.
|
||||
|
||||
Inversement, si les enregistrements sont trop durables, un pair compromis ou supprimé reste visible dans la DHT pendant une durée excessive. L'équilibre entre durabilité et fraîcheur des enregistrements constitue un paramètre critique dont les valeurs optimales dépendent du profil de connectivité des nœuds.
|
||||
|
||||
### 4.5 Inférence par Analyse de Présence
|
||||
|
||||
Même en supposant que le contenu des enregistrements est chiffré, l'analyse des patterns de présence révèle des informations sensibles. La fréquence des heartbeats d'un nœud révèle son cycle opérationnel. L'apparition simultanée de plusieurs nœuds révèle une coordination. La corrélation des moments d'apparition et de disparition avec des événements externes peut permettre de reconstruire des plans opérationnels.
|
||||
|
||||
Ce risque s'applique même dans un réseau entièrement chiffré : les métadonnées de trafic (qui contacte qui, quand, avec quelle fréquence) constituent une source d'information indépendante du contenu.
|
||||
|
||||
### 4.6 Linkabilité : Corrélation d'Identités Pseudonymes
|
||||
|
||||
Dans les réseaux P2P utilisant des identités pseudonymes (clés publiques sans identité réelle associée), la corrélation de sessions successives peut permettre de relier différentes identités pseudonymes à un même acteur physique. Si un nœud utilise systématiquement les mêmes sous-ensembles de pairs pour se connecter, cette structure relationnelle constitue une empreinte distinctive.
|
||||
|
||||
La linkabilité est particulièrement problématique dans les contextes où un acteur doit interagir successivement avec des pairs dont certains peuvent être adversariaux.
|
||||
|
||||
---
|
||||
|
||||
## 5. Risques des Transactions Monétaires Décentralisées
|
||||
|
||||
### 5.1 Double-Spend dans un Réseau AP
|
||||
|
||||
Nakamoto [Nakamoto, 2008] a proposé le premier mécanisme pratique de prévention du double-spend dans un réseau pair-à-pair sans autorité centrale, en s'appuyant sur une chaîne de blocs et une preuve de travail. Ce mécanisme offre une finalité probabiliste : une transaction devient de plus en plus difficile à annuler à mesure que des blocs s'accumulent au-dessus d'elle.
|
||||
|
||||
Dans un réseau AP (disponibilité prioritaire sur cohérence), deux nœuds partitionnés peuvent chacun valider indépendamment une transaction utilisant le même actif, créant un conflit qui ne pourra être résolu qu'à la réunification du réseau. Les protocoles classiques de consensus blockchain (PoW, PoS) supposent une connectivité suffisante pour que les messages de consensus atteignent une majorité de validateurs en temps raisonnable — hypothèse violée en contexte DTN.
|
||||
|
||||
### 5.2 Front-Running et MEV
|
||||
|
||||
Daian et al. [Daian et al., 2020] ont documenté le phénomène de Miner Extractable Value (MEV) : les validateurs de transactions (mineurs ou stakers) peuvent réordonner, insérer ou censurer des transactions au sein d'un bloc pour extraire une valeur maximale, au détriment des utilisateurs ordinaires. Dans un marché de ressources décentralisé, ce phénomène peut prendre la forme d'enchères truquées, de saisie prioritaire de ressources convoitées, ou de censure de transactions concurrentes.
|
||||
|
||||
Le MEV n'est pas une attaque dans le sens traditionnel du terme : c'est une exploitation parfaitement légale des règles du protocole par les validateurs. Sa prévention requiert des mécanismes de protection de l'ordre des transactions (commit-reveal schemes, batched auctions à ordonnancement aléatoire).
|
||||
|
||||
### 5.3 Oracle Problem
|
||||
|
||||
Szabo [Szabo, 1997] a introduit la notion de smart contract — un contrat auto-exécutant dont les termes sont encodés directement dans du code informatique. Un smart contract de règlement de ressources doit typiquement se déclencher lorsqu'une ressource a été effectivement consommée. Mais comment un smart contract, exécuté dans un environnement blockchain fermé, peut-il vérifier un événement du monde réel (exécution d'un calcul, livraison d'une donnée) ?
|
||||
|
||||
Ce problème d'oracle est fondamentalement non résolu. Les solutions existantes (oracles décentralisés comme Chainlink, attestations TEE) réduisent la surface de confiance requise mais ne l'éliminent pas.
|
||||
|
||||
### 5.4 Smart Contract Bugs et Reentrancy
|
||||
|
||||
L'exploit du DAO en 2016 a démontré de manière spectaculaire les risques des smart contracts : un bug de reentrancy dans le contrat a permis à un attaquant de drainer environ 60 millions de dollars. Luu et al. [Luu et al., 2016] ont documenté systématiquement les classes de vulnérabilités dans les smart contracts Ethereum, incluant la reentrancy, les problèmes de timestamp, les conditions de course, et les débordements arithmétiques.
|
||||
|
||||
Dans un contexte de marché de ressources distribuées, les smart contracts gèrent potentiellement des actifs de valeur significative. La vérification formelle de ces contrats, quoique possible pour des contrats simples, reste hors de portée pour des contrats complexes.
|
||||
|
||||
### 5.5 Confidentialité des Transactions et Transparence Blockchain
|
||||
|
||||
La transparence totale des blockchains publiques comme Ethereum ou Bitcoin est antagoniste à la souveraineté des données. Meiklejohn et al. [Meiklejohn et al., 2013] ont démontré que les transactions Bitcoin, supposément pseudonymes, peuvent être reliées à des entités réelles avec un taux de succès élevé par analyse de graphe des transactions, corrélation avec des événements externes, et utilisation des adresses de réutilisation.
|
||||
|
||||
Dans un contexte opérationnel hostile, la simple visibilité des volumes de transactions entre acteurs peut révéler des informations stratégiques critiques : qui paie qui, combien, à quelle fréquence, pour quel type de ressource.
|
||||
|
||||
### 5.6 Finality et Latence : Inadaptation aux Environnements DTN
|
||||
|
||||
Les blockchains à preuve de travail offrent une finalité probabiliste : une transaction confirmée par 6 blocs sur Bitcoin est considérée pratiquement irréversible, mais ce processus prend environ une heure dans des conditions normales. Les blockchains à consensus BFT (Tendermint, HotStuff) offrent une finalité déterministe en quelques secondes, mais supposent une connectivité suffisante entre les validateurs.
|
||||
|
||||
Dans un réseau DTN à connectivité intermittente, un validateur peut être absent pendant plusieurs heures. Si sa participation est requise pour atteindre le quorum, les transactions sont bloquées pendant toute la durée de son absence.
|
||||
|
||||
---
|
||||
|
||||
## 6. Risques Spécifiques au Contexte Spatial et Embarqué
|
||||
|
||||
### 6.1 Connectivité Intermittente et Impossibilité de Heartbeat Continu
|
||||
|
||||
Les systèmes de détection de panne par heartbeat périodique supposent une connectivité continue entre les nœuds surveillés et les observateurs. Dans un contexte DTN, un nœud peut être legitimimement injoignable pendant plusieurs heures sans être défaillant. Un mécanisme de détection de panne naïf basé sur des timeouts courts produira des faux positifs massifs, dégradant la qualité du réseau de voisinage.
|
||||
|
||||
La distinction entre "nœud absent (normal)" et "nœud défaillant ou malveillant" requiert une connaissance du calendrier de contact prévisible — information qui peut elle-même être sensible et non publiable dans un réseau hostile.
|
||||
|
||||
### 6.2 Bande Passante Limitée
|
||||
|
||||
Les liens de communication satellites présentent des débits asymétriques et limités. Les protocoles P2P verbeux (abondance de messages de maintien de connexion, de propagation de rumeurs, de synchronisation de tables de routage) peuvent consommer une fraction substantielle de la bande passante disponible, laissant peu de capacité pour le trafic applicatif.
|
||||
|
||||
Les algorithmes de consensus blockchain (PoW en particulier) sont particulièrement inadaptés : la propagation de blocs volumineux sur des liens à bande passante limitée peut prendre plusieurs minutes, dégradant les performances et créant des conditions propices aux forks.
|
||||
|
||||
### 6.3 Contraintes Énergétiques
|
||||
|
||||
Les nœuds embarqués spatiaux opèrent sur des bilans énergétiques stricts. Le Proof of Work, qui requiert une dépense énergétique continue proportionnelle à la sécurité offerte, est structurellement incompatible avec ces contraintes. Les algorithmes de consensus alternatifs (Proof of Stake, BFT) présentent des profils énergétiques compatibles mais requièrent une disponibilité réseau plus élevée.
|
||||
|
||||
### 6.4 Isolation Planifiée et Mode Dégradé
|
||||
|
||||
Dans les opérations spatiales, des périodes d'isolation planifiée (passage dans l'ombre, maintenance silence radio, orbite en dehors de la couverture sol) font partie du profil opérationnel normal. Le réseau doit être conçu pour fonctionner en mode complètement autonome pendant ces périodes, sans dégradation irréversible de l'état du réseau.
|
||||
|
||||
Cette contrainte impose que tout état critique soit répliqué localement sur le nœud, que les décisions opérationnelles ne requièrent pas de consensus réseau en temps réel, et que la resynchronisation après reconnexion soit déterministe et complète.
|
||||
|
||||
### 6.5 Attaque Physique : Extraction des Secrets
|
||||
|
||||
Un adversaire physiquement présent peut capturer un nœud embarqué et en extraire les secrets cryptographiques (clés privées, PSK, identifiants de réseau). Cette attaque compromet non seulement le nœud capturé mais potentiellement l'ensemble du réseau si les clés partagées ne sont pas renouvelées rapidement.
|
||||
|
||||
La mitigation passe par des modules de sécurité matériels (HSM, Secure Enclave) rendant l'extraction difficile, des mécanismes de rotation régulière des clés, et des procédures de révocation réactives permettant d'invalider rapidement les clés d'un nœud capturé.
|
||||
|
||||
### 6.6 Compromission de Longue Durée : Stealthy Byzantine
|
||||
|
||||
Amir et al. [Amir et al., 2011] ont documenté les "stealthy Byzantine attacks" : un nœud compromis peut se comporter normalement pendant une longue période pour éviter la détection, puis activer un comportement malveillant coordonné au moment le plus opportun. Ce mode d'attaque est particulièrement dangereux car les mécanismes de réputation basés sur l'historique comportemental accordent une haute confiance aux nœuds de longue date, précisément les cibles préférées de ce type de compromission.
|
||||
|
||||
La seule mitigation partielle est la vérification périodique de la cohérence des comportements sur des dimensions difficiles à simuler (précision des challenges cryptographiques, cohérence des données annoncées avec des sources indépendantes), combinée à une surveillance croisée par des témoins multiples.
|
||||
|
||||
---
|
||||
|
||||
## 7. Gestion de la Confiance Relative et des Consortiums
|
||||
|
||||
### 7.1 Pourquoi la Confiance Binaire Échoue dans un Réseau Hostile
|
||||
|
||||
Un modèle de confiance binaire associe à chaque pair un état discret : "de confiance" ou "non-fiable". Ce modèle échoue dans les environnements complexes pour plusieurs raisons :
|
||||
|
||||
Premièrement, un pair peut être partiellement compromis — fiable pour certaines opérations mais non pour d'autres. Deuxièmement, la confiance est temporelle : un pair fiable hier peut être compromis aujourd'hui. Troisièmement, le modèle binaire est vulnérable aux promotions frauduleuses : un adversaire peut se comporter parfaitement pendant une longue période pour atteindre le statut "de confiance", puis exploiter ce statut.
|
||||
|
||||
Un modèle continu à score multidimensionnel offre une résistance bien supérieure en rendant la manipulation plus coûteuse et plus facile à détecter : altérer simultanément plusieurs dimensions comportementales de manière cohérente requiert une sophistication bien supérieure à la simple patience.
|
||||
|
||||
### 7.2 Trust on First Use (TOFU) et ses Limites
|
||||
|
||||
Le paradigme TOFU accepte la clé publique d'un pair lors du premier contact et la considère ensuite comme authentique. Cette approche est pragmatique mais présente une vulnérabilité critique au premier contact : un adversaire capable d'intercepter cette première connexion peut substituer sa propre clé.
|
||||
|
||||
SSH utilise TOFU couplé à une alerte en cas de changement ultérieur de la clé. Cette approche est raisonnable pour les réseaux à topologie relativement stable, mais problématique dans les réseaux où les nœuds sont fréquemment remplacés ou réinitialisés.
|
||||
|
||||
### 7.3 Consortiums à Confiance Choisie : Modèles de Gouvernance
|
||||
|
||||
Les consortiums permettent de définir explicitement des sous-groupes de pairs pour lesquels une confiance a priori plus élevée est accordée, sur la base d'une relation organisationnelle hors-bande. Dans un contexte spatial ou militaire, ces consortiums correspondent typiquement à des coalitions ou à des unités opérationnelles.
|
||||
|
||||
La gouvernance de ces consortiums soulève des questions : qui peut admettre de nouveaux membres ? Selon quel mécanisme ? Quel quorum est requis pour l'exclusion d'un membre ? Ces décisions de gouvernance ne peuvent elles-mêmes être prises de manière décentralisée que si un mécanisme de consensus approprié existe pour les supporters.
|
||||
|
||||
### 7.4 Révocation de Confiance Sans Autorité Centrale : Le Problème Non Résolu
|
||||
|
||||
La révocation d'un membre d'un consortium sans autorité centrale est un problème partiellement ouvert. Les Certificate Revocation Lists (CRL) et l'OCSP des PKI traditionnelles supposent une autorité de révocation centrale. Dans un réseau décentralisé, la révocation doit être propagée de manière épidémique — mais une propagation épidémique peut être bloquée si l'adversaire contrôle les canaux de communication des nœuds informés de la révocation.
|
||||
|
||||
### 7.5 Propagation Épidémique des Signaux de Méfiance
|
||||
|
||||
Das, Gupta et Motivala [Das et al., 2002] ont proposé le protocole SWIM (Scalable Weakly-consistent Infection-style Process Group Membership) pour la détection de panne distribuée. Le principe de base — infection-style propagation — peut être étendu à la propagation de signaux de méfiance : un nœud ayant observé un comportement suspect diffuse cette information à un sous-ensemble aléatoire de ses pairs, qui la propagent à leur tour.
|
||||
|
||||
Cette approche offre une convergence probabiliste en O(log n) étapes, mais est vulnérable si l'adversaire contrôle une fraction significative des canaux de propagation ou peut identifier et bloquer sélectivement les messages de méfiance le concernant.
|
||||
|
||||
---
|
||||
|
||||
## 8. Synthèse et Classification des Risques
|
||||
|
||||
Le tableau suivant synthétise les risques identifiés, leur criticité selon le contexte, et les mitigations disponibles :
|
||||
|
||||
| Risque | Couche | Criticité (réseau privé) | Criticité (réseau hostile) | Mitigation principale | Limite résiduelle |
|
||||
|---|---|---|---|---|---|
|
||||
| MitM actif | Transport | Élevée | Critique | Noise Protocol Framework (mutual auth) | Vulnérabilité TOFU au premier contact |
|
||||
| Eclipse attack | Réseau | Moyenne | Élevée | Diversification des connexions, multi-seeds | Adversaire contrôlant un ISP |
|
||||
| Partitionnement réseau | Réseau | Faible | Élevée | Choix AP délibéré, cohérence relaxée | Forks d'état lors de la réunification |
|
||||
| Traffic analysis | Transport | Faible | Critique | Padding, traffic shaping, mixnets | Coût prohibitif à grande échelle |
|
||||
| Sybil attack | Découverte | Faible (PSK) | Élevée | PSK organisationnelle + scoring comportemental | Réseau ouvert hostile non résolu |
|
||||
| Routing table poisoning | Découverte | Moyenne | Élevée | S/Kademlia (signatures sur messages) | Adversaire avec fraction > 1/3 |
|
||||
| Eclipse sur DHT | Découverte | Moyenne | Élevée | Distribution des seeds, diversité des voisins | Compromis performance/sécurité |
|
||||
| Bootstrap poisoning | Découverte | Faible | Critique | Multi-seeds distribués, clés codées en dur | Seeds = ancre de confiance unique |
|
||||
| Churn adversarial | Découverte | Faible | Moyenne | Rate limiting, délai de grâce | Difficile à distinguer du churn normal |
|
||||
| Collusion scoring | Découverte | Faible | Élevée | Témoin croisé, challenges multidimensionnels | Insoluble si adversaire > 1/3 |
|
||||
| Data poisoning | Données | Élevée | Critique | Records signés | Distribution des clés publiques |
|
||||
| Replay attack | Données | Moyenne | Élevée | Nonce + timestamp dans payload signé | Synchronisation d'horloge en DTN |
|
||||
| Tombstone malveillant | Données | Moyenne | Élevée | Signature + timestamp de révocation | Compromission de clé privée |
|
||||
| Inférence par présence | Données | Faible | Critique | Padding temporel, faux heartbeats | Coût en bande passante |
|
||||
| Linkabilité | Données | Faible | Élevée | Rotation d'identité, padding | Difficulté opérationnelle |
|
||||
| Double-spend | Monétaire | Élevée | Élevée | Consensus BFT (finalité déterministe) | Indisponibilité des validateurs |
|
||||
| MEV / Front-running | Monétaire | Moyenne | Élevée | Commit-reveal, batch auctions | Résistance imparfaite |
|
||||
| Oracle problem | Monétaire | Élevée | Élevée | TEE attestations, ZK-proofs | Surface de confiance matérielle |
|
||||
| Smart contract bug | Monétaire | Élevée | Élevée | Vérification formelle, audits | Impossibilité de vérification complète |
|
||||
| Transparence blockchain | Monétaire | Moyenne | Critique | ZK-SNARKs, TEE confidential contracts | Maturité des solutions (ZK) |
|
||||
| Stealthy Byzantine | Tous | Faible | Élevée | Scoring multidimensionnel + témoins | Détection possible seulement a posteriori |
|
||||
| Capture physique | Tous | Faible | Critique | HSM, rotation régulière des clés | Fenêtre de compromission avant révocation |
|
||||
|
||||
---
|
||||
|
||||
## Références Bibliographiques
|
||||
|
||||
**[Amir et al., 2011]** Amir, Y., Coan, B., Kirsch, J., & Lane, J. (2011). Prime: Byzantine Replication Under Attack. *IEEE Transactions on Dependable and Secure Computing*, 8(4), 564–577.
|
||||
|
||||
**[Baumgart & Meinert, 2007]** Baumgart, I., & Meinert, S. (2007). S/Kademlia: A practicable approach towards secure key-based routing. In *Proceedings of the 2007 International Conference on Parallel and Distributed Systems (ICPADS)*, pp. 1–8.
|
||||
|
||||
**[Brewer, 2000]** Brewer, E. A. (2000). Towards robust distributed systems. In *Proceedings of the 19th Annual ACM Symposium on Principles of Distributed Computing (PODC)*, pp. 7. (Keynote address).
|
||||
|
||||
**[Cerf et al., 2007]** Cerf, V., Burleigh, S., Hooke, A., Torgerson, L., Durst, R., Scott, K., Fall, K., & Weiss, H. (2007). Delay-Tolerant Networking Architecture. *RFC 4838*, IETF.
|
||||
|
||||
**[Daian et al., 2020]** Daian, P., Goldfeder, S., Kell, T., Li, Y., Zhao, X., Bentov, I., Breidenbach, L., & Juels, A. (2020). Flash Boys 2.0: Frontrunning in Decentralized Exchanges, Miner Extractable Value, and Consensus Instability. In *Proceedings of the 2020 IEEE Symposium on Security and Privacy (S&P)*, pp. 910–927.
|
||||
|
||||
**[Das et al., 2002]** Das, A., Gupta, I., & Motivala, A. (2002). SWIM: Scalable Weakly-consistent Infection-style Process Group Membership Protocol. In *Proceedings of the 2002 International Conference on Dependable Systems and Networks (DSN)*, pp. 303–312.
|
||||
|
||||
**[Douceur, 2002]** Douceur, J. R. (2002). The Sybil Attack. In *Proceedings of the 1st International Workshop on Peer-to-Peer Systems (IPTPS)*, LNCS 2429, pp. 251–260.
|
||||
|
||||
**[Fall, 2003]** Fall, K. (2003). A delay-tolerant network architecture for challenged internets. In *Proceedings of the 2003 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communications (SIGCOMM)*, pp. 27–34.
|
||||
|
||||
**[Fischer, Lynch & Paterson, 1985]** Fischer, M. J., Lynch, N. A., & Paterson, M. S. (1985). Impossibility of distributed consensus with one faulty process. *Journal of the ACM (JACM)*, 32(2), 374–382.
|
||||
|
||||
**[Gilbert & Lynch, 2002]** Gilbert, S., & Lynch, N. (2002). Brewer's conjecture and the feasibility of consistent, available, partition-tolerant web services. *ACM SIGACT News*, 33(2), 51–59.
|
||||
|
||||
**[Heilman et al., 2015]** Heilman, E., Kendler, A., Zohar, A., & Goldberg, S. (2015). Eclipse Attacks on Bitcoin's Peer-to-Peer Network. In *Proceedings of the 24th USENIX Security Symposium*, pp. 129–144.
|
||||
|
||||
**[Lamport, Shostak & Pease, 1982]** Lamport, L., Shostak, R., & Pease, M. (1982). The Byzantine Generals Problem. *ACM Transactions on Programming Languages and Systems (TOPLAS)*, 4(3), 382–401.
|
||||
|
||||
**[Luu et al., 2016]** Luu, L., Chu, D.-H., Olickel, H., Saxena, P., & Hobor, A. (2016). Making Smart Contracts Smarter. In *Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security (CCS)*, pp. 254–269.
|
||||
|
||||
**[Maymounkov & Mazières, 2002]** Maymounkov, P., & Mazières, D. (2002). Kademlia: A Peer-to-Peer Information System Based on the XOR Metric. In *Proceedings of the 1st International Workshop on Peer-to-Peer Systems (IPTPS)*, LNCS 2429, pp. 53–65.
|
||||
|
||||
**[Meiklejohn et al., 2013]** Meiklejohn, S., Pomarole, M., Jordan, G., Levchenko, K., McCoy, D., Voelker, G. M., & Savage, S. (2013). A Fistful of Bitcoins: Characterizing Payments Among Men with No Names. In *Proceedings of the 2013 Internet Measurement Conference (IMC)*, pp. 127–140.
|
||||
|
||||
**[Murdoch & Danezis, 2005]** Murdoch, S. J., & Danezis, G. (2005). Low-Cost Traffic Analysis of Tor. In *Proceedings of the 2005 IEEE Symposium on Security and Privacy (S&P)*, pp. 183–195.
|
||||
|
||||
**[Nakamoto, 2008]** Nakamoto, S. (2008). Bitcoin: A Peer-to-Peer Electronic Cash System. *Whitepaper*. https://bitcoin.org/bitcoin.pdf
|
||||
|
||||
**[Rhea et al., 2004]** Rhea, S., Geels, D., Roscoe, T., & Kubiatowicz, J. (2004). Handling Churn in a DHT. In *Proceedings of the 2004 USENIX Annual Technical Conference*, pp. 127–140.
|
||||
|
||||
**[Singh et al., 2006]** Singh, A., Castro, M., Druschel, P., & Rowstron, A. (2006). Defending against Eclipse attacks on overlay networks. In *Proceedings of the 11th Workshop on ACM SIGOPS European Workshop*, article 21.
|
||||
|
||||
**[Stoica et al., 2003]** Stoica, I., Morris, R., Liben-Nowell, D., Karger, D. R., Kaashoek, M. F., Dabek, F., & Balakrishnan, H. (2003). Chord: A scalable peer-to-peer lookup protocol for internet applications. *IEEE/ACM Transactions on Networking*, 11(1), 17–32.
|
||||
|
||||
**[Sun et al., 2015]** Sun, Y., Edmundson, A., Vanbever, L., Li, O., Rexford, J., Chiang, M., & Mittal, P. (2015). RAPTOR: Routing Attacks on Privacy in Tor. In *Proceedings of the 24th USENIX Security Symposium*, pp. 271–286.
|
||||
|
||||
**[Szabo, 1997]** Szabo, N. (1997). Formalizing and securing relationships on public networks. *First Monday*, 2(9).
|
||||
64
docs/diagrams/01_node_init.puml
Normal file
64
docs/diagrams/01_node_init.puml
Normal file
@@ -0,0 +1,64 @@
|
||||
@startuml
|
||||
title Node Initialization — Peer A (InitNode)
|
||||
|
||||
participant "main (Peer A)" as MainA
|
||||
participant "Node A" as NodeA
|
||||
participant "libp2p (Peer A)" as libp2pA
|
||||
participant "ConnectionGater A" as GaterA
|
||||
participant "DB Peer A (oc-lib)" as DBA
|
||||
participant "NATS A" as NATSA
|
||||
participant "Indexer (shared)" as IndexerA
|
||||
participant "DHT A" as DHTA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "PubSubService A" as PubSubA
|
||||
|
||||
MainA -> NodeA: InitNode(isNode=true, isIndexer=false)
|
||||
|
||||
NodeA -> NodeA: LoadKeyFromFilePrivate() → priv
|
||||
NodeA -> NodeA: LoadPSKFromFile() → psk
|
||||
|
||||
NodeA -> GaterA: newOCConnectionGater(nil)
|
||||
NodeA -> libp2pA: New(\n PrivateNetwork(psk),\n Identity(priv),\n ListenAddr: tcp/4001,\n ConnectionGater(gater)\n)
|
||||
libp2pA --> NodeA: host A (PeerID_A)
|
||||
NodeA -> GaterA: gater.host = host A
|
||||
|
||||
note over GaterA: InterceptSecured (inbound):\n1. DB lookup by peer_id\n → BLACKLIST : refuse\n → found : accept\n2. Not found → DHT sequential check\n (transport-error fallthrough only)
|
||||
|
||||
NodeA -> libp2pA: SetStreamHandler(/opencloud/probe/1.0, HandleBandwidthProbe)
|
||||
NodeA -> libp2pA: SetStreamHandler(/opencloud/witness/1.0, HandleWitnessQuery)
|
||||
|
||||
NodeA -> libp2pA: NewGossipSub(ctx, host) → ps (GossipSub)
|
||||
|
||||
NodeA -> NodeA: buildRecord() closure\n→ signs fresh PeerRecord (expiry=now+2min)\n embedded in each heartbeat tick
|
||||
|
||||
NodeA -> IndexerA: ConnectToIndexers(host, minIndexer=1, maxIndexer=5, buildRecord)
|
||||
note over IndexerA: Reads IndexerAddresses from config\nAdds seeds → Indexers Directory (IsSeed=true)\nLaunches SendHeartbeat goroutine (20s ticker)
|
||||
|
||||
IndexerA -> DHTA: proactive DHT discovery (after 5s warmup)\ninitNodeDHT(h, seeds)\nDiscoverIndexersFromDHT → SelectByFillRate\n→ add to Indexers Directory + NudgeIt()
|
||||
|
||||
NodeA -> NodeA: claimInfo(name, hostname)
|
||||
NodeA -> IndexerA: TempStream /opencloud/record/publish/1.0
|
||||
NodeA -> IndexerA: stream.Encode(Signed PeerRecord A)
|
||||
IndexerA -> DHTA: PutValue("/node/"+DID_A, record)
|
||||
|
||||
NodeA -> NodeA: StartGC(30s)
|
||||
|
||||
NodeA -> StreamA: InitStream(ctx, host, PeerID_A, 1000, nodeA)
|
||||
StreamA -> StreamA: SetStreamHandler(resource/search, create, update,\n delete, planner, verify, considers)
|
||||
StreamA --> NodeA: StreamService A
|
||||
|
||||
NodeA -> PubSubA: InitPubSub(ctx, host, ps, nodeA, streamA)
|
||||
PubSubA -> PubSubA: subscribeEvents(PB_SEARCH, timeout=-1)
|
||||
PubSubA --> NodeA: PubSubService A
|
||||
|
||||
NodeA -> NodeA: SubscribeToSearch(ps, callback)
|
||||
note over NodeA: callback: if evt.From != self\n → GetPeerRecord(evt.From)\n → StreamService.SendResponse
|
||||
|
||||
NodeA -> NATSA: ListenNATS(nodeA)
|
||||
note over NATSA: Subscribes:\nCREATE_RESOURCE → partner on-demand\nPROPALGATION_EVENT → resource propagation
|
||||
|
||||
NodeA --> MainA: *Node A is ready
|
||||
|
||||
note over NodeA,IndexerA: SendHeartbeat goroutine (permanent, 20s ticker):\nNode → Indexer : Heartbeat{name, PeerID, indexersBinded, need, challenges?, record}\nIndexer → Node : HeartbeatResponse{fillRate, challenges, suggestions, witnesses, suggestMigrate}\nScore updated (7 dimensions), pool managed autonomously
|
||||
|
||||
@enduml
|
||||
38
docs/diagrams/02_node_claim.puml
Normal file
38
docs/diagrams/02_node_claim.puml
Normal file
@@ -0,0 +1,38 @@
|
||||
@startuml
|
||||
title Node Claim — Peer A publish its PeerRecord (claimInfo + publishPeerRecord)
|
||||
|
||||
participant "DB Peer A (oc-lib)" as DBA
|
||||
participant "Node A" as NodeA
|
||||
participant "Indexer (shared)" as IndexerA
|
||||
participant "DHT Kademlia" as DHT
|
||||
participant "NATS A" as NATSA
|
||||
|
||||
NodeA -> DBA: DB(PEER).Search(SELF)
|
||||
DBA --> NodeA: existing peer (DID_A) or new UUID
|
||||
|
||||
NodeA -> NodeA: LoadKeyFromFilePrivate() → priv A
|
||||
NodeA -> NodeA: LoadKeyFromFilePublic() → pub A
|
||||
|
||||
NodeA -> NodeA: Build PeerRecord A {\n Name, DID, PubKey,\n PeerID: PeerID_A,\n APIUrl: hostname,\n StreamAddress: /ip4/.../tcp/4001/p2p/PeerID_A,\n NATSAddress, WalletAddress\n}
|
||||
|
||||
NodeA -> NodeA: priv.Sign(rec) → signature
|
||||
NodeA -> NodeA: rec.ExpiryDate = now + 150s
|
||||
|
||||
loop For every Node Binded Indexer (Indexer A, B, ...)
|
||||
NodeA -> IndexerA: TempStream /opencloud/record/publish/1.0
|
||||
NodeA -> IndexerA: strea!.Encode(Signed PeerRecord A)
|
||||
|
||||
IndexerA -> IndexerA: Verify signature
|
||||
IndexerA -> IndexerA: Check PeerID_A heartbeat stream
|
||||
IndexerA -> DHT: PutValue("/node/"+DID_A, PeerRecord A)
|
||||
DHT --> IndexerA: ok
|
||||
end
|
||||
|
||||
NodeA -> NodeA: rec.ExtractPeer(DID_A, DID_A, pub A)
|
||||
NodeA -> NATSA: SetNATSPub(CREATE_RESOURCE, {PEER, Peer A JSON})
|
||||
NATSA -> DBA: Upsert Peer A (SearchAttr: peer_id)
|
||||
DBA --> NATSA: ok
|
||||
|
||||
NodeA --> NodeA: *peer.Peer A (SELF)
|
||||
|
||||
@enduml
|
||||
59
docs/diagrams/03_indexer_heartbeat.puml
Normal file
59
docs/diagrams/03_indexer_heartbeat.puml
Normal file
@@ -0,0 +1,59 @@
|
||||
@startuml indexer_heartbeat
|
||||
title Heartbeat bidirectionnel node → indexeur (scoring 7 dimensions + challenges)
|
||||
|
||||
participant "Node A" as NodeA
|
||||
participant "Node B" as NodeB
|
||||
participant "IndexerService" as Indexer
|
||||
|
||||
note over NodeA,NodeB: SendHeartbeat goroutine — tick every 20s
|
||||
|
||||
== Tick Node A ==
|
||||
|
||||
NodeA -> Indexer: NewStream /opencloud/heartbeat/1.0\n(long-lived, réutilisé aux ticks suivants)
|
||||
NodeA -> Indexer: stream.Encode(Heartbeat{\n name, PeerID_A, timestamp,\n indexersBinded: [addr1, addr2],\n need: maxPool - len(pool),\n challenges: [PeerID_A, PeerID_B], ← batch (tous les 1-10 HBs)\n challengeDID: "uuid-did-A", ← DHT challenge (tous les 5 batches)\n record: SignedPeerRecord_A ← expiry=now+2min\n})
|
||||
|
||||
Indexer -> Indexer: CheckHeartbeat(stream, maxNodes)\n→ len(Peers()) >= maxNodes → reject
|
||||
|
||||
Indexer -> Indexer: HandleHeartbeat → UptimeTracker.RecordHeartbeat()\n→ gap ≤ 2×interval : TotalOnline += gap
|
||||
|
||||
Indexer -> Indexer: Republish PeerRecord A to DHT\nDHT.PutValue("/node/"+DID_A, record_A)
|
||||
|
||||
== Réponse indexeur → node A ==
|
||||
|
||||
Indexer -> Indexer: BuildHeartbeatResponse(remotePeer=A, need, challenges, challengeDID)\n\nfillRate = connected_nodes / MaxNodesConn()\npeerCount = connected_nodes\nmaxNodes = MaxNodesConn()\nbornAt = time of indexer startup\n\nChallenges: pour chaque PeerID challengé\n found = PeerID dans StreamRecords[ProtocolHeartbeat]?\n lastSeen = HeartbeatStream.UptimeTracker.LastSeen\n\nDHT challenge:\n DHT.GetValue("/node/"+challengeDID, timeout=3s)\n → dhtFound + dhtPayload\n\nWitnesses: jusqu'à 3 AddrInfos de nœuds connectés\n (adresses connues dans Peerstore)\n\nSuggestions: jusqu'à `need` indexeurs depuis dhtCache\n (refresh asynchrone 2min, SelectByFillRate)\n\nSuggestMigrate: fillRate > 80%\n ET node dans offload.inBatch (batch ≤ 5, grace 3×HB)
|
||||
|
||||
Indexer --> NodeA: stream.Encode(HeartbeatResponse{\n fillRate, peerCount, maxNodes, bornAt,\n challenges, dhtFound, dhtPayload,\n witnesses, suggestions, suggestMigrate\n})
|
||||
|
||||
== Traitement score côté Node A ==
|
||||
|
||||
NodeA -> NodeA: score = ensureScore(Indexers, addr_indexer)\nscore.UptimeTracker.RecordHeartbeat()\n\nlatencyScore = max(0, 1 - RTT / (BaseRoundTrip × 10))\n\nBornAt stability:\n bornAt changed? → score.bornAtChanges++\n\nfillConsistency:\n expected = peerCount / maxNodes\n |expected - fillRate| < 10% → fillConsistent++\n\nChallenge PeerID (ground truth own PeerID):\n found=true AND lastSeen < 2×interval → challengeCorrect++\n\nDHT challenge:\n dhtFound=true → dhtSuccess++\n\nWitness query (async):\n go queryWitnesses(h, indexerID, bornAt, fillRate, witnesses, score)
|
||||
|
||||
NodeA -> NodeA: score.Score = ComputeNodeSideScore(latencyScore)\n\nScore = (\n 0.20 × uptimeRatio\n+ 0.20 × challengeAccuracy\n+ 0.15 × latencyScore\n+ 0.10 × fillScore ← 1 - fillRate\n+ 0.10 × fillConsistency\n+ 0.15 × witnessConsistency\n+ 0.10 × dhtSuccessRate\n) × 100 × bornAtPenalty\n\nbornAtPenalty = max(0, 1 - 0.30 × bornAtChanges)\nminScore = clamp(20 + 60 × (age.Hours/24), 20, 80)
|
||||
|
||||
alt score < minScore\n AND TotalOnline ≥ 2×interval\n AND !IsSeed\n AND len(pool) > 1
|
||||
NodeA -> NodeA: evictPeer(dir, addr, id, proto)\n→ delete Addr + Score + Stream\ngo TriggerConsensus(h, voters, need)\n ou replenishIndexersFromDHT(h, need)
|
||||
end
|
||||
|
||||
alt resp.SuggestMigrate == true AND nonSeedCount >= MinIndexer
|
||||
alt IsSeed
|
||||
NodeA -> NodeA: score.IsSeed = false\n(de-stickied — score eviction maintenant possible)
|
||||
else !IsSeed
|
||||
NodeA -> NodeA: evictPeer → migration acceptée
|
||||
end
|
||||
end
|
||||
|
||||
alt len(resp.Suggestions) > 0
|
||||
NodeA -> NodeA: handleSuggestions(dir, indexerID, suggestions)\n→ inconnus ajoutés à Indexers Directory\n→ NudgeIt() si ajout effectif
|
||||
end
|
||||
|
||||
== Tick Node B (concurrent) ==
|
||||
|
||||
NodeB -> Indexer: stream.Encode(Heartbeat{PeerID_B, ...})
|
||||
Indexer -> Indexer: CheckHeartbeat → UptimeTracker → BuildHeartbeatResponse
|
||||
Indexer --> NodeB: HeartbeatResponse{...}
|
||||
|
||||
== GC côté Indexeur ==
|
||||
|
||||
note over Indexer: GC ticker 30s — gc()\nnow.After(Expiry) où Expiry = lastHBTime + 2min\n→ AfterDelete(pid, name, did) hors lock\n→ publishNameEvent(NameIndexDelete, ...)\nFillRate recalculé automatiquement
|
||||
|
||||
@enduml
|
||||
47
docs/diagrams/04_indexer_publish.puml
Normal file
47
docs/diagrams/04_indexer_publish.puml
Normal file
@@ -0,0 +1,47 @@
|
||||
@startuml
|
||||
title Indexer — Peer A publishing, Peer B publishing (handleNodePublish → DHT)
|
||||
|
||||
participant "Node A" as NodeA
|
||||
participant "Node B" as NodeB
|
||||
participant "IndexerService (shared)" as Indexer
|
||||
participant "DHT Kademlia" as DHT
|
||||
|
||||
note over NodeA: Start after claimInfo or refresh TTL
|
||||
|
||||
par Peer A publish its PeerRecord
|
||||
NodeA -> Indexer: TempStream /opencloud/record/publish/1.0
|
||||
NodeA -> Indexer: stream.Encode(PeerRecord A {DID_A, PeerID_A, PubKey_A, Expiry, Sig_A})
|
||||
|
||||
Indexer -> Indexer: Verify sig_A (reconstruit rec minimal, pubKey_A.Verify)
|
||||
Indexer -> Indexer: Check StreamRecords[Heartbeat][PeerID_A] existe
|
||||
|
||||
alt A active Heartbeat
|
||||
Indexer -> Indexer: StreamRecord A → DID_A, Record=PeerRecord A, LastSeen=now
|
||||
Indexer -> DHT: PutValue("/node/"+DID_A, PeerRecord A JSON)
|
||||
Indexer -> DHT: PutValue("/name/"+name_A, DID_A)
|
||||
Indexer -> DHT: PutValue("/peer/"+peer_id_A, DID_A)
|
||||
DHT --> Indexer: ok
|
||||
else Pas de heartbeat
|
||||
Indexer -> NodeA: (erreur "no heartbeat", stream close)
|
||||
end
|
||||
else Peer B publish its PeerRecord
|
||||
NodeB -> Indexer: TempStream /opencloud/record/publish/1.0
|
||||
NodeB -> Indexer: stream.Encode(PeerRecord B {DID_B, PeerID_B, PubKey_B, Expiry, Sig_B})
|
||||
|
||||
Indexer -> Indexer: Verify sig_B
|
||||
Indexer -> Indexer: Check StreamRecords[Heartbeat][PeerID_B] existe
|
||||
|
||||
alt B Active Heartbeat
|
||||
Indexer -> Indexer: StreamRecord B → DID_B, Record=PeerRecord B, LastSeen=now
|
||||
Indexer -> DHT: PutValue("/node/"+DID_B, PeerRecord B JSON)
|
||||
Indexer -> DHT: PutValue("/name/"+name_B, DID_B)
|
||||
Indexer -> DHT: PutValue("/peer/"+peer_id_B, DID_B)
|
||||
DHT --> Indexer: ok
|
||||
else Pas de heartbeat
|
||||
Indexer -> NodeB: (erreur "no heartbeat", stream close)
|
||||
end
|
||||
end par
|
||||
|
||||
note over DHT: DHT got \n"/node/DID_A" et "/node/DID_B"
|
||||
|
||||
@enduml
|
||||
51
docs/diagrams/05_indexer_get.puml
Normal file
51
docs/diagrams/05_indexer_get.puml
Normal file
@@ -0,0 +1,51 @@
|
||||
@startuml
|
||||
title Indexer — Peer A discover Peer B (GetPeerRecord + handleNodeGet)
|
||||
|
||||
participant "NATS A" as NATSA
|
||||
participant "DB Pair A (oc-lib)" as DBA
|
||||
participant "Node A" as NodeA
|
||||
participant "IndexerService (partagé)" as Indexer
|
||||
participant "DHT Kademlia" as DHT
|
||||
participant "NATS A (retour)" as NATSA2
|
||||
|
||||
note over NodeA: Trigger : NATS PB_SEARCH PEER\nor callback SubscribeToSearch
|
||||
|
||||
NodeA -> DBA: (PEER).Search(DID_B or PeerID_B)
|
||||
DBA --> NodeA: Local Peer B (if known) → solve DID_B + PeerID_B\nor use search value
|
||||
|
||||
loop For every Peer A Binded Indexer
|
||||
NodeA -> Indexer: TempStream /opencloud/record/get/1.0 -> streamAI
|
||||
NodeA -> Indexer: streamAI.Encode(GetValue{Key: DID_B, PeerID: PeerID_B})
|
||||
|
||||
Indexer -> Indexer: key = "/node/" + DID_B
|
||||
Indexer -> DHT: SearchValue(ctx 10s, "/node/"+DID_B)
|
||||
DHT --> Indexer: channel de bytes (PeerRecord B)
|
||||
|
||||
loop Pour every results in DHT
|
||||
Indexer -> Indexer: read → PeerRecord B
|
||||
alt PeerRecord.PeerID == PeerID_B
|
||||
Indexer -> Indexer: resp.Found=true, resp.Records[PeerID_B]=PeerRecord B
|
||||
Indexer -> Indexer: StreamRecord B.LastSeen = now (if active heartbeat)
|
||||
end
|
||||
end
|
||||
|
||||
Indexer -> NodeA: streamAI.Encode(GetResponse{Found:true, Records:{PeerID_B: PeerRecord B}})
|
||||
end
|
||||
|
||||
loop For every PeerRecord founded
|
||||
NodeA -> NodeA: rec.Verify() → valid B signature
|
||||
NodeA -> NodeA: rec.ExtractPeer(ourDID_A, DID_B, pubKey_B)
|
||||
|
||||
alt ourDID_A == DID_B (it's our proper entry)
|
||||
note over NodeA: Republish to refresh TTL
|
||||
NodeA -> Indexer: publishPeerRecord(rec) [refresh 2 min]
|
||||
end
|
||||
|
||||
NodeA -> NATSA2: SetNATSPub(CREATE_RESOURCE, {PEER, Peer B JSON,\nSearchAttr:"peer_id"})
|
||||
NATSA2 -> DBA: Upsert Peer B in DB A
|
||||
DBA --> NATSA2: ok
|
||||
end
|
||||
|
||||
NodeA --> NodeA: []*peer.Peer → [Peer B]
|
||||
|
||||
@enduml
|
||||
49
docs/diagrams/06_native_registration.puml
Normal file
49
docs/diagrams/06_native_registration.puml
Normal file
@@ -0,0 +1,49 @@
|
||||
@startuml native_registration
|
||||
title Native Indexer — Indexer Subscription (StartNativeRegistration)
|
||||
|
||||
participant "Indexer A" as IndexerA
|
||||
participant "Indexer B" as IndexerB
|
||||
participant "Native Indexer" as Native
|
||||
participant "DHT Kademlia" as DHT
|
||||
participant "GossipSub (oc-indexer-registry)" as PubSub
|
||||
|
||||
note over IndexerA,IndexerB: At start + every 60s (RecommendedHeartbeatInterval)\\nStartNativeRegistration → RegisterWithNative
|
||||
|
||||
par Indexer A subscribe
|
||||
IndexerA -> IndexerA: fillRateFn()\\n= len(StreamRecords[HB]) / maxNodes
|
||||
|
||||
IndexerA -> IndexerA: Build IndexerRegistration{\\n PeerID_A, Addr_A,\\n Timestamp=now.UnixNano(),\\n FillRate=fillRateFn(),\\n PubKey, Signature\\n}\\nreg.Sign(h)
|
||||
|
||||
IndexerA -> Native: NewStream /opencloud/native/subscribe/1.0
|
||||
IndexerA -> Native: stream.Encode(IndexerRegistration A)
|
||||
|
||||
Native -> Native: reg.Verify() — verify signature
|
||||
Native -> Native: liveIndexerEntry{\\n PeerID_A, Addr_A,\\n ExpiresAt = now + IndexerTTL (90s),\\n FillRate = reg.FillRate,\\n PubKey, Signature\\n}
|
||||
Native -> Native: liveIndexers[PeerID_A] = entry A
|
||||
Native -> Native: knownPeerIDs[PeerID_A] = Addr_A
|
||||
|
||||
Native -> DHT: PutValue("/indexer/"+PeerID_A, entry A)
|
||||
DHT --> Native: ok
|
||||
|
||||
Native -> PubSub: topic.Publish([]byte(PeerID_A))
|
||||
note over PubSub: Gossip to other Natives\\n→ it adds PeerID_A to knownPeerIDs\\n→ refresh DHT next tick (30s)
|
||||
|
||||
IndexerA -> Native: stream.Close()
|
||||
|
||||
else Indexer B subscribe
|
||||
IndexerB -> IndexerB: fillRateFn() + reg.Sign(h)
|
||||
IndexerB -> Native: NewStream /opencloud/native/subscribe/1.0
|
||||
IndexerB -> Native: stream.Encode(IndexerRegistration B)
|
||||
|
||||
Native -> Native: reg.Verify() + liveIndexerEntry{FillRate=reg.FillRate, ExpiresAt=now+90s}
|
||||
Native -> Native: liveIndexers[PeerID_B] = entry B
|
||||
Native -> DHT: PutValue("/indexer/"+PeerID_B, entry B)
|
||||
Native -> PubSub: topic.Publish([]byte(PeerID_B))
|
||||
IndexerB -> Native: stream.Close()
|
||||
end par
|
||||
|
||||
note over Native: liveIndexers = {PeerID_A: {FillRate:0.3}, PeerID_B: {FillRate:0.6}}\\nTTL 90s — IndexerTTL
|
||||
|
||||
note over Native: Explicit unsubcrive on stop :\\nUnregisterFromNative → /opencloud/native/unsubscribe/1.0\\nNative close all now.
|
||||
|
||||
@enduml
|
||||
70
docs/diagrams/07_native_get_consensus.puml
Normal file
70
docs/diagrams/07_native_get_consensus.puml
Normal file
@@ -0,0 +1,70 @@
|
||||
@startuml native_get_consensus
|
||||
title Native — ConnectToNatives : fetch pool + Phase 1 + Phase 2
|
||||
|
||||
participant "Node / Indexer\\n(appelant)" as Caller
|
||||
participant "Native A" as NA
|
||||
participant "Native B" as NB
|
||||
participant "Indexer A\\n(stable voter)" as IA
|
||||
|
||||
note over Caller: NativeIndexerAddresses configured\\nConnectToNatives() called from ConnectToIndexers
|
||||
|
||||
== Step 1 : heartbeat to the native mesh (nativeHeartbeatOnce) ==
|
||||
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
|
||||
== Step 2 : parrallel fetch pool (timeout 6s) ==
|
||||
par fetchIndexersFromNative — parallel
|
||||
Caller -> NA: NewStream /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: maxIndexer, From: PeerID}
|
||||
NA -> NA: reachableLiveIndexers()\\ntri par w(F) = fillRate×(1−fillRate) desc
|
||||
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3,IB:0.6}}
|
||||
else
|
||||
Caller -> NB: NewStream /opencloud/native/indexers/1.0
|
||||
NB -> NB: reachableLiveIndexers()
|
||||
NB --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3,IB:0.6}}
|
||||
end par
|
||||
|
||||
note over Caller: Fusion → candidates=[IA,IB]\\nisFallback=false
|
||||
|
||||
alt isFallback=true (native give themself as Fallback indexer)
|
||||
note over Caller: resolvePool : avoid consensus\\nadmittedAt = Now (zero)\\nStaticIndexers = {native_addr}
|
||||
else isFallback=false → Phase 1 + Phase 2
|
||||
== Phase 1 — clientSideConsensus (timeout 3s/natif, 4s total) ==
|
||||
par Parralel Consensus
|
||||
Caller -> NA: NewStream /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
|
||||
NA -> NA: compare with clean liveIndexers
|
||||
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
|
||||
else
|
||||
Caller -> NB: NewStream /opencloud/native/consensus/1.0
|
||||
NB --> Caller: ConsensusResponse{Trusted:[IA], Suggestions:[IC]}
|
||||
end par
|
||||
|
||||
note over Caller: IA → 2/2 votes → confirmed ✓\\nIB → 1/2 vote → refusé ✗\\nIC → suggestion → round 2 if confirmed < maxIndexer
|
||||
|
||||
alt confirmed < maxIndexer && available suggestions
|
||||
note over Caller: Round 2 — rechallenge with confirmed + sample(suggestions)\\nclientSideConsensus([IA, IC])
|
||||
end
|
||||
|
||||
note over Caller: admittedAt = time.Now()
|
||||
|
||||
== Phase 2 — indexerLivenessVote (timeout 3s/votant, 4s total) ==
|
||||
note over Caller: Search for stable voters in Subscribed Indexers\\nAdmittedAt != zero && age >= MinStableAge (2min)
|
||||
|
||||
alt Stable Voters are available
|
||||
par Phase 2 parrallel
|
||||
Caller -> IA: NewStream /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IA]}
|
||||
IA -> IA: StreamRecords[ProtocolHB][candidate]\\ntime.Since(LastSeen) <= 120s && LastScore >= 30.0
|
||||
IA --> Caller: IndexerConsensusResponse{Alive:[IA]}
|
||||
end par
|
||||
note over Caller: alive IA confirmed per quorum > 0.5\\npool = {IA}
|
||||
else No voters are stable (startup)
|
||||
note over Caller: Phase 1 keep directly\\n(no indexer reaches MinStableAge)
|
||||
end
|
||||
|
||||
== Replacement pool ==
|
||||
Caller -> Caller: replaceStaticIndexers(pool, admittedAt)\\nStaticIndexerMeta[IA].AdmittedAt = admittedAt
|
||||
end
|
||||
|
||||
== Étape 3 : heartbeat to indexers pool (ConnectToIndexers) ==
|
||||
Caller -> Caller: SendHeartbeat /opencloud/heartbeat/1.0\\nvers StaticIndexers
|
||||
|
||||
@enduml
|
||||
38
docs/diagrams/08_nats_create_update_peer.puml
Normal file
38
docs/diagrams/08_nats_create_update_peer.puml
Normal file
@@ -0,0 +1,38 @@
|
||||
@startuml
|
||||
title NATS — CREATE_RESOURCE : Peer A crée/met à jour Peer B (connexion on-demand)
|
||||
|
||||
participant "App Peer A (oc-api)" as AppA
|
||||
participant "NATS A" as NATSA
|
||||
participant "Node A" as NodeA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "Node B" as NodeB
|
||||
participant "DB Peer A (oc-lib)" as DBA
|
||||
|
||||
note over AppA: Peer B est découvert\n(via indexeur ou manuellement)
|
||||
|
||||
AppA -> NATSA: Publish(CREATE_RESOURCE, {\n FromApp:"oc-api",\n Datatype:PEER,\n Payload: Peer B {StreamAddress_B, Relation:PARTNER}\n})
|
||||
|
||||
NATSA -> NodeA: ListenNATS callback → CREATE_RESOURCE
|
||||
|
||||
NodeA -> NodeA: json.Unmarshal(payload) → peer.Peer B
|
||||
NodeA -> NodeA: if peer == self ? → skip
|
||||
|
||||
alt peer B.Relation == PARTNER
|
||||
NodeA -> StreamA: ToPartnerPublishEvent(ctx, PB_CREATE, PEER, payload)
|
||||
note over StreamA: Pas de heartbeat permanent.\nConnexion on-demand : ouvre un stream,\nenvoie l'événement, ferme ou laisse expirer.
|
||||
StreamA -> StreamA: PublishCommon(PEER, user, B.PeerID,\n ProtocolUpdateResource, selfPeerJSON)
|
||||
StreamA -> NodeB: TempStream /opencloud/resource/update/1.0\n(TTL court, fermé après envoi)
|
||||
StreamA -> NodeB: stream.Encode(Event{from, datatype, payload})
|
||||
NodeB --> StreamA: (traitement applicatif)
|
||||
|
||||
else peer B.Relation != PARTNER (révocation / blacklist)
|
||||
note over NodeA: Ferme tous les streams existants vers Peer B
|
||||
loop Pour chaque stream actif vers PeerID_B
|
||||
NodeA -> StreamA: streams[proto][PeerID_B].Stream.Close()
|
||||
NodeA -> StreamA: delete(streams[proto], PeerID_B)
|
||||
end
|
||||
end
|
||||
|
||||
NodeA -> DBA: (pas d'écriture directe — seule l'app source gère la DB)
|
||||
|
||||
@enduml
|
||||
73
docs/diagrams/09_nats_propagation.puml
Normal file
73
docs/diagrams/09_nats_propagation.puml
Normal file
@@ -0,0 +1,73 @@
|
||||
@startuml
|
||||
title NATS — PROPALGATION_EVENT : Peer A propalgate to Peer B lookup
|
||||
|
||||
participant "App Pair A" as AppA
|
||||
participant "NATS A" as NATSA
|
||||
participant "Node A" as NodeA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "Node Partner B" as PeerB
|
||||
participant "Node C" as PeerC
|
||||
|
||||
participant "NATS B" as NATSB
|
||||
participant "DB Pair B (oc-lib)" as DBB
|
||||
|
||||
note over App: only our proper resource (db data) can be propalgate : creator_id==self
|
||||
|
||||
AppA -> NATSA: Publish(PROPALGATION_EVENT, {Action, DataType, Payload})
|
||||
NATSA -> NodeA: ListenNATS callback → PROPALGATION_EVENT
|
||||
NodeA -> NodeA: propalgate from himself ? → no, continue
|
||||
NodeA -> NodeA: json.Unmarshal → PropalgationMessage{Action, DataType, Payload}
|
||||
|
||||
alt Action == PB_DELETE
|
||||
NodeA -> StreamA: ToPartnerPublishEvent(PB_DELETE, dt, user, payload)
|
||||
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
|
||||
StreamA -> NodeB: write(PeerID_B, addr_B, dt, user, payload, ProtocolDeleteResource)
|
||||
note over NodeB: /opencloud/resource/delete/1.0
|
||||
|
||||
NodeB -> NodeB: handleEventFromPartner(evt, ProtocolDeleteResource)
|
||||
NodeB -> NATSB: SetNATSPub(REMOVE_RESOURCE, {DataType, resource JSON})
|
||||
NATSB -> DBB: Suppress ressource into DB B
|
||||
|
||||
else Action == PB_UPDATE (per ProtocolUpdateResource)
|
||||
NodeA -> StreamA: ToPartnerPublishEvent(PB_UPDATE, dt, user, payload)
|
||||
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
|
||||
StreamA -> NodeB: write → /opencloud/resource/update/1.0
|
||||
NodeB -> NATSB: SetNATSPub(CREATE_RESOURCE, {DataType, resource JSON})
|
||||
NATSB -> DBB: Upsert ressource dans DB B
|
||||
|
||||
else Action == PB_CREATE (per ProtocolCreateResource)
|
||||
NodeA -> StreamA: ToPartnerPublishEvent(PB_UPDATE, dt, user, payload)
|
||||
StreamA -> StreamA: searchPeer(PARTNER) → [Peer Partner B, ...]
|
||||
StreamA -> NodeB: write → /opencloud/resource/create/1.0
|
||||
NodeB -> NATSB: SetNATSPub(CREATE_RESOURCE, {DataType, resource JSON})
|
||||
NATSB -> DBB: Create ressource dans DB B
|
||||
|
||||
else Action == PB_CONSIDERS (is a considering a previous action, such as planning or creating resource)
|
||||
NodeA -> NodeA: Unmarshal → executionConsidersPayload{PeerIDs:[PeerID_B, ...]}
|
||||
loop For every peer_id targeted
|
||||
NodeA -> StreamA: PublishCommon(dt, user, PeerID_B, ProtocolConsidersResource, payload)
|
||||
StreamA -> NodeB: write → /opencloud/resource/considers/1.0
|
||||
NodeB -> NodeB: passConsidering(evt)
|
||||
NodeB -> NATSB: SetNATSPub(PROPALGATION_EVENT, {PB_CONSIDERS, dt, payload})
|
||||
NATSB -> DBB: (treat per emmitters app of a previous action on NATS B)
|
||||
end
|
||||
|
||||
else Action == PB_CLOSE_PLANNER
|
||||
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B}
|
||||
NodeA -> StreamA: Streams[ProtocolSendPlanner][PeerID_B].Stream.Close()
|
||||
NodeA -> StreamA: delete(Streams[ProtocolSendPlanner], PeerID_B)
|
||||
|
||||
else Action == PB_SEARCH + DataType == PEER
|
||||
NodeA -> NodeA: read → {search: "..."}
|
||||
NodeA -> NodeA: GetPeerRecord(ctx, search)
|
||||
note over NodeA: Resolved per DB A or Indexer + DHT
|
||||
NodeA -> NATSA: SetNATSPub(SEARCH_EVENT, {PEER, PeerRecord JSON})
|
||||
NATSA -> NATSA: (AppA retrieve results)
|
||||
|
||||
else Action == PB_SEARCH + other DataType
|
||||
NodeA -> NodeA: read → {type:"all"|"known"|"partner", search:"..."}
|
||||
NodeA -> NodeA: PubSubService.SearchPublishEvent(ctx, dt, type, user, search)
|
||||
note over NodeA: Watch after pubsub_search & stream_search diagrams
|
||||
end
|
||||
|
||||
@enduml
|
||||
58
docs/diagrams/10_pubsub_search.puml
Normal file
58
docs/diagrams/10_pubsub_search.puml
Normal file
@@ -0,0 +1,58 @@
|
||||
@startuml
|
||||
title PubSub — Gossip Global search (type "all") : Peer A searching, Peer B answering
|
||||
|
||||
participant "App UI A" as UIA
|
||||
participant "App Peer A" as AppA
|
||||
participant "NATS A" as NATSA
|
||||
participant "Node A" as NodeA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "PubSubService A" as PubSubA
|
||||
participant "GossipSub libp2p (mesh)" as GossipSub
|
||||
participant "Node B" as NodeB
|
||||
participant "PubSubService B" as PubSubB
|
||||
participant "DB Peer B (oc-lib)" as DBB
|
||||
participant "StreamService B" as StreamB
|
||||
|
||||
UIA -> AppA: websocket subscription, sending {type:"all", search:"search"} in query
|
||||
|
||||
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_SEARCH, type:"all", search:"search"})
|
||||
NATSA -> NodeA: ListenNATS → PB_SEARCH (type "all")
|
||||
|
||||
NodeA -> PubSubA: SearchPublishEvent(ctx, dt, "all", user, "search")
|
||||
PubSubA -> PubSubA: publishEvent(PB_SEARCH, user, {search:"search"})
|
||||
PubSubA -> PubSubA: priv_A.Sign(event body) → sig
|
||||
PubSubA -> PubSubA: Build Event{Type:"search", From:DID_A, Payload:{search:"search"}, Sig}
|
||||
|
||||
PubSubA -> GossipSub: topic.Join("search")
|
||||
PubSubA -> GossipSub: topic.Publish(ctx, json(Event))
|
||||
|
||||
GossipSub --> NodeB: Propalgate message (gossip mesh)
|
||||
|
||||
NodeB -> PubSubB: subscribeEvents listen to topic "search#"
|
||||
PubSubB -> PubSubB: read → Event{From: DID_A}
|
||||
|
||||
PubSubB -> NodeB: GetPeerRecord(ctx, DID_A)
|
||||
note over NodeB: Resolve Peer A per DB B or ask to Indexer
|
||||
NodeB --> PubSubB: Peer A {PublicKey_A, Relation, ...}
|
||||
|
||||
PubSubB -> PubSubB: event.Verify(Peer A) → valid sig_A
|
||||
PubSubB -> PubSubB: handleEventSearch(ctx, evt, PB_SEARCH)
|
||||
|
||||
PubSubB -> StreamB: SendResponse(Peer A, evt)
|
||||
StreamB -> DBB: Search(COMPUTE + STORAGE + ..., filters{creator=self, access=PUBLIC OR partnerships[PeerID_A]}, search="search")
|
||||
DBB --> StreamB: [Resource1, Resource2, ...]
|
||||
|
||||
loop For every matching resource, only match our own resource creator_id=self_did
|
||||
StreamB -> StreamB: write(PeerID_A, addr_A, dt, resource JSON, ProtocolSearchResource)
|
||||
StreamB -> StreamA: NewStream /opencloud/resource/search/1.0
|
||||
StreamB -> StreamA: stream.Encode(Event{Type:search, From:DID_B, DataType, Payload:resource})
|
||||
end
|
||||
|
||||
StreamA -> StreamA: readLoop → handleEvent(ProtocolSearchResource, evt)
|
||||
StreamA -> StreamA: retrieveResponse(evt)
|
||||
StreamA -> NATSA: SetNATSPub(SEARCH_EVENT, {DataType, resource JSON})
|
||||
NATSA -> AppA: Search results from Peer B
|
||||
|
||||
AppA -> UIA: emit on websocket
|
||||
|
||||
@enduml
|
||||
54
docs/diagrams/11_stream_search.puml
Normal file
54
docs/diagrams/11_stream_search.puml
Normal file
@@ -0,0 +1,54 @@
|
||||
@startuml
|
||||
title Stream — Direct search (type "known"/"partner") : Peer A → Peer B
|
||||
|
||||
participant "App UI A" as UIA
|
||||
participant "App Pair A" as AppA
|
||||
participant "NATS A" as NATSA
|
||||
participant "Node A" as NodeA
|
||||
participant "PubSubService A" as PubSubA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "DB Pair A (oc-lib)" as DBA
|
||||
participant "Node B" as NodeB
|
||||
participant "StreamService B" as StreamB
|
||||
participant "DB Pair B (oc-lib)" as DBB
|
||||
|
||||
UIA -> AppA: websocket subscription, sending {type:"all", search:"search"} in query
|
||||
|
||||
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_SEARCH, type:"partner", search:"gpu"})
|
||||
NATSA -> NodeA: ListenNATS → PB_SEARCH (type "partner")
|
||||
NodeA -> PubSubA: SearchPublishEvent(ctx, dt, "partner", user, "gpu")
|
||||
|
||||
PubSubA -> StreamA: SearchPartnersPublishEvent(dt, user, "gpu")
|
||||
StreamA -> DBA: Search(PEER, PARTNER) + PeerIDS config
|
||||
DBA --> StreamA: [Peer B, ...]
|
||||
|
||||
loop Pour chaque pair partenaire (Pair B)
|
||||
StreamA -> StreamA: write(PeerID_B, addr_B, dt, user, payload, ProtocolSearchResource)
|
||||
StreamA -> NodeB: TempStream /opencloud/resource/search/1.0
|
||||
StreamA -> NodeB: stream.Encode(Event{Type:search, From:DID_A, DataType, Payload:{search:"gpu"}})
|
||||
|
||||
NodeB -> StreamB: HandleResponse(stream) → readLoop
|
||||
StreamB -> StreamB: handleEvent(ProtocolSearchResource, evt)
|
||||
StreamB -> StreamB: handleEventFromPartner(evt, ProtocolSearchResource)
|
||||
|
||||
alt evt.DataType == -1 (toutes ressources)
|
||||
StreamB -> DBA: Search(PEER, evt.From=DID_A)
|
||||
note over StreamB: Local Resolving (DB) or GetPeerRecord (Indexer Way)
|
||||
StreamB -> StreamB: SendResponse(Peer A, evt)
|
||||
StreamB -> DBB: Search(ALL_RESOURCES, filter{creator=B + public OR partner A + search:"gpu"})
|
||||
DBB --> StreamB: [Resource1, Resource2, ...]
|
||||
else evt.DataType specified
|
||||
StreamB -> DBB: Search(DataType, filter{creator=B + access + search:"gpu"})
|
||||
DBB --> StreamB: [Resource1, ...]
|
||||
end
|
||||
|
||||
loop Pour chaque ressource
|
||||
StreamB -> StreamA: write(PeerID_A, addr_A, dt, resource JSON, ProtocolSearchResource)
|
||||
StreamA -> StreamA: readLoop → handleEvent(ProtocolSearchResource, evt)
|
||||
StreamA -> StreamA: retrieveResponse(evt)
|
||||
StreamA -> NATSA: SetNATSPub(SEARCH_EVENT, {DataType, resource JSON})
|
||||
NATSA -> AppA: Peer B results
|
||||
AppA -> UIA: emit on websocket
|
||||
end
|
||||
end
|
||||
@enduml
|
||||
60
docs/diagrams/12_partner_heartbeat.puml
Normal file
60
docs/diagrams/12_partner_heartbeat.puml
Normal file
@@ -0,0 +1,60 @@
|
||||
@startuml
|
||||
title Stream — Partner Heartbeat et propagation CRUD Pair A ↔ Pair B
|
||||
|
||||
participant "DB Pair A (oc-lib)" as DBA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "Node A" as NodeA
|
||||
participant "Node B" as NodeB
|
||||
participant "StreamService B" as StreamB
|
||||
participant "NATS B" as NATSB
|
||||
participant "DB Pair B (oc-lib)" as DBB
|
||||
participant "NATS A" as NATSA
|
||||
|
||||
note over StreamA: Démarrage → connectToPartners()
|
||||
|
||||
StreamA -> DBA: Search(PEER, PARTNER) + PeerIDS config
|
||||
DBA --> StreamA: [Peer B, ...]
|
||||
|
||||
StreamA -> NodeB: Connect (libp2p)
|
||||
StreamA -> NodeB: NewStream /opencloud/resource/heartbeat/partner/1.0
|
||||
StreamA -> NodeB: json.Encode(Heartbeat{Name_A, DID_A, PeerID_A, IndexersBinded_A})
|
||||
|
||||
NodeB -> StreamB: HandlePartnerHeartbeat(stream)
|
||||
StreamB -> StreamB: CheckHeartbeat → bandwidth challenge
|
||||
StreamB -> StreamA: Echo(payload)
|
||||
StreamB -> StreamB: streams[ProtocolHeartbeatPartner][PeerID_A] = {DID_A, Expiry=now+10s}
|
||||
|
||||
StreamA -> StreamA: streams[ProtocolHeartbeatPartner][PeerID_B] = {DID_B, Expiry=now+10s}
|
||||
|
||||
note over StreamA,StreamB: Stream partner long-lived établi\nGC toutes les 8s (StreamService A)\nGC toutes les 30s (StreamService B)
|
||||
|
||||
note over NATSA: Pair A reçoit PROPALGATION_EVENT{PB_DELETE, dt:"storage", payload:res}
|
||||
|
||||
NATSA -> NodeA: ListenNATS → ToPartnerPublishEvent(PB_DELETE, dt, user, payload)
|
||||
NodeA -> StreamA: ToPartnerPublishEvent(ctx, PB_DELETE, dt_storage, user, payload)
|
||||
|
||||
alt dt == PEER (mise à jour relation partenaire)
|
||||
StreamA -> StreamA: json.Unmarshal → peer.Peer B updated
|
||||
alt B.Relation == PARTNER
|
||||
StreamA -> NodeB: ConnectToPartner(B.StreamAddress)
|
||||
note over StreamA,NodeB: Reconnexion heartbeat si relation upgrade
|
||||
else B.Relation != PARTNER
|
||||
loop Tous les protocoles
|
||||
StreamA -> StreamA: delete(streams[proto][PeerID_B])
|
||||
StreamA -> NodeB: (streams fermés)
|
||||
end
|
||||
end
|
||||
else dt != PEER (ressource ordinaire)
|
||||
StreamA -> DBA: Search(PEER, PARTNER) → [Pair B, ...]
|
||||
loop Pour chaque protocole partner (Create/Update/Delete)
|
||||
StreamA -> NodeB: write(PeerID_B, addr_B, dt, user, payload, ProtocolDeleteResource)
|
||||
note over NodeB: /opencloud/resource/delete/1.0
|
||||
|
||||
NodeB -> StreamB: HandleResponse → readLoop
|
||||
StreamB -> StreamB: handleEventFromPartner(evt, ProtocolDeleteResource)
|
||||
StreamB -> NATSB: SetNATSPub(REMOVE_RESOURCE, {DataType, resource JSON})
|
||||
NATSB -> DBB: Supprimer ressource dans DB B
|
||||
end
|
||||
end
|
||||
|
||||
@enduml
|
||||
51
docs/diagrams/13_planner_flow.puml
Normal file
51
docs/diagrams/13_planner_flow.puml
Normal file
@@ -0,0 +1,51 @@
|
||||
@startuml
|
||||
title Stream — Session Planner : Pair A demande le plan de Pair B
|
||||
|
||||
participant "App Pair A (oc-booking)" as AppA
|
||||
participant "NATS A" as NATSA
|
||||
participant "Node A" as NodeA
|
||||
participant "StreamService A" as StreamA
|
||||
participant "Node B" as NodeB
|
||||
participant "StreamService B" as StreamB
|
||||
participant "DB Pair B (oc-lib)" as DBB
|
||||
participant "NATS B" as NATSB
|
||||
|
||||
' Ouverture session planner
|
||||
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_PLANNER, peer_id:PeerID_B, payload:{}})
|
||||
NATSA -> NodeA: ListenNATS → PB_PLANNER
|
||||
|
||||
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B, payload: {}}
|
||||
NodeA -> StreamA: PublishCommon(nil, user, PeerID_B, ProtocolSendPlanner, {})
|
||||
note over StreamA: WaitResponse=true, TTL=24h\nStream long-lived vers Pair B
|
||||
StreamA -> NodeB: TempStream /opencloud/resource/planner/1.0
|
||||
StreamA -> NodeB: json.Encode(Event{Type:planner, From:DID_A, Payload:{}})
|
||||
|
||||
NodeB -> StreamB: HandleResponse → readLoop(ProtocolSendPlanner)
|
||||
StreamB -> StreamB: handleEvent(ProtocolSendPlanner, evt)
|
||||
StreamB -> StreamB: sendPlanner(evt)
|
||||
|
||||
alt evt.Payload vide (requête initiale)
|
||||
StreamB -> DBB: planner.GenerateShallow(AdminRequest)
|
||||
DBB --> StreamB: plan (shallow booking plan de Pair B)
|
||||
StreamB -> StreamA: PublishCommon(nil, user, DID_A, ProtocolSendPlanner, planJSON)
|
||||
StreamA -> NodeA: json.Encode(Event{plan de B})
|
||||
NodeA -> NATSA: (forwardé à AppA via SEARCH_EVENT ou PLANNER event)
|
||||
NATSA -> AppA: Plan de Pair B
|
||||
else evt.Payload non vide (mise à jour planner)
|
||||
StreamB -> StreamB: m["peer_id"] = evt.From (DID_A)
|
||||
StreamB -> NATSB: SetNATSPub(PROPALGATION_EVENT, {PB_PLANNER, peer_id:DID_A, payload:plan})
|
||||
NATSB -> DBB: (oc-booking traite le plan sur NATS B)
|
||||
end
|
||||
|
||||
' Fermeture session planner
|
||||
AppA -> NATSA: Publish(PROPALGATION_EVENT, {PB_CLOSE_PLANNER, peer_id:PeerID_B})
|
||||
NATSA -> NodeA: ListenNATS → PB_CLOSE_PLANNER
|
||||
|
||||
NodeA -> NodeA: Unmarshal → {peer_id: PeerID_B}
|
||||
NodeA -> StreamA: Mu.Lock()
|
||||
NodeA -> StreamA: Streams[ProtocolSendPlanner][PeerID_B].Stream.Close()
|
||||
NodeA -> StreamA: delete(Streams[ProtocolSendPlanner], PeerID_B)
|
||||
NodeA -> StreamA: Mu.Unlock()
|
||||
note over StreamA,NodeB: Stream planner fermé — session terminée
|
||||
|
||||
@enduml
|
||||
61
docs/diagrams/14_native_offload_gc.puml
Normal file
61
docs/diagrams/14_native_offload_gc.puml
Normal file
@@ -0,0 +1,61 @@
|
||||
@startuml
|
||||
title Native Indexer — Boucles background (offload, DHT refresh, GC streams)
|
||||
|
||||
participant "Indexer A (enregistré)" as IndexerA
|
||||
participant "Indexer B (enregistré)" as IndexerB
|
||||
participant "Native Indexer" as Native
|
||||
participant "DHT Kademlia" as DHT
|
||||
participant "Node A (responsible peer)" as NodeA
|
||||
|
||||
note over Native: runOffloadLoop — toutes les 30s
|
||||
|
||||
loop Toutes les 30s
|
||||
Native -> Native: len(responsiblePeers) > 0 ?
|
||||
note over Native: responsiblePeers = peers pour lesquels\nle native a fait selfDelegate (aucun indexer dispo)
|
||||
alt Des responsible peers existent (ex: Node A)
|
||||
Native -> Native: reachableLiveIndexers()
|
||||
note over Native: Filtre liveIndexers par TTL\nping PeerIsAlive pour chaque candidat
|
||||
alt Indexers A et B maintenant joignables
|
||||
Native -> Native: responsiblePeers = {} (libère Node A et autres)
|
||||
note over Native: Node A se reconnectera\nau prochain ConnectToNatives
|
||||
else Toujours aucun indexer
|
||||
note over Native: Node A reste sous la responsabilité du native
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
note over Native: refreshIndexersFromDHT — toutes les 30s
|
||||
|
||||
loop Toutes les 30s
|
||||
Native -> Native: Collecter tous les knownPeerIDs\n= {PeerID_A, PeerID_B, ...}
|
||||
loop Pour chaque PeerID connu
|
||||
Native -> Native: liveIndexers[PeerID] encore frais ?
|
||||
alt Entrée manquante ou expirée
|
||||
Native -> DHT: SearchValue(ctx 5s, "/indexer/"+PeerID)
|
||||
DHT --> Native: channel de bytes
|
||||
loop Pour chaque résultat DHT
|
||||
Native -> Native: Unmarshal → liveIndexerEntry
|
||||
Native -> Native: Garder le meilleur (ExpiresAt le plus récent, valide)
|
||||
end
|
||||
Native -> Native: liveIndexers[PeerID] = best entry
|
||||
note over Native: "native: refreshed indexer from DHT"
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
note over Native: LongLivedStreamRecordedService GC — toutes les 30s
|
||||
|
||||
loop Toutes les 30s
|
||||
Native -> Native: gc() — lock StreamRecords[Heartbeat]
|
||||
loop Pour chaque StreamRecord (Indexer A, B, ...)
|
||||
Native -> Native: now > rec.Expiry ?\nOU timeSince(LastSeen) > 2×TTL restant ?
|
||||
alt Pair périmé (ex: Indexer B disparu)
|
||||
Native -> Native: Supprimer Indexer B de TOUS les maps de protocoles
|
||||
note over Native: Stream heartbeat fermé\nliveIndexers[PeerID_B] expirera naturellement
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
note over IndexerA: Indexer A continue à heartbeater normalement\net reste dans StreamRecords + liveIndexers
|
||||
|
||||
@enduml
|
||||
49
docs/diagrams/15_archi_config_nominale.puml
Normal file
49
docs/diagrams/15_archi_config_nominale.puml
Normal file
@@ -0,0 +1,49 @@
|
||||
@startuml 15_archi_config_nominale
|
||||
skinparam componentStyle rectangle
|
||||
skinparam backgroundColor white
|
||||
skinparam defaultTextAlignment center
|
||||
|
||||
title C1 — Topologie nominale\n2 natifs · 2 indexeurs · 2 nœuds
|
||||
|
||||
package "Couche 1 — Mesh natif" #E8F4FD {
|
||||
component "Native A\n(hub autoritaire)" as NA #AED6F1
|
||||
component "Native B\n(hub autoritaire)" as NB #AED6F1
|
||||
NA <--> NB : heartbeat /opencloud/heartbeat/1.0 (20s)\n+ gossip PubSub oc-indexer-registry
|
||||
}
|
||||
|
||||
package "Couche 2 — Indexeurs" #E9F7EF {
|
||||
component "Indexer A\n(DHT server)" as IA #A9DFBF
|
||||
component "Indexer B\n(DHT server)" as IB #A9DFBF
|
||||
}
|
||||
|
||||
package "Couche 3 — Nœuds" #FEFBD8 {
|
||||
component "Node 1" as N1 #FAF0BE
|
||||
component "Node 2" as N2 #FAF0BE
|
||||
}
|
||||
|
||||
' Enregistrements (one-shot, 60s)
|
||||
IA -[#117A65]--> NA : subscribe signé (60s)\n/opencloud/native/subscribe/1.0
|
||||
IA -[#117A65]--> NB : subscribe signé (60s)
|
||||
IB -[#117A65]--> NA : subscribe signé (60s)
|
||||
IB -[#117A65]--> NB : subscribe signé (60s)
|
||||
|
||||
' Heartbeats indexeurs → natifs (long-lived, 20s)
|
||||
IA -[#27AE60]..> NA : heartbeat (20s)
|
||||
IA -[#27AE60]..> NB : heartbeat (20s)
|
||||
IB -[#27AE60]..> NA : heartbeat (20s)
|
||||
IB -[#27AE60]..> NB : heartbeat (20s)
|
||||
|
||||
' Heartbeats nœuds → indexeurs (long-lived, 20s)
|
||||
N1 -[#E67E22]--> IA : heartbeat long-lived (20s)\n/opencloud/heartbeat/1.0
|
||||
N1 -[#E67E22]--> IB : heartbeat long-lived (20s)
|
||||
N2 -[#E67E22]--> IA : heartbeat long-lived (20s)
|
||||
N2 -[#E67E22]--> IB : heartbeat long-lived (20s)
|
||||
|
||||
note as Legend
|
||||
Légende :
|
||||
──► enregistrement one-shot (signé)
|
||||
···► heartbeat long-lived (20s)
|
||||
──► heartbeat nœud → indexeur (20s)
|
||||
end note
|
||||
|
||||
@enduml
|
||||
38
docs/diagrams/16_archi_config_seed.puml
Normal file
38
docs/diagrams/16_archi_config_seed.puml
Normal file
@@ -0,0 +1,38 @@
|
||||
@startuml 16_archi_config_seed
|
||||
skinparam componentStyle rectangle
|
||||
skinparam backgroundColor white
|
||||
skinparam defaultTextAlignment center
|
||||
|
||||
title C2 — Mode seed (sans natif)\nIndexerAddresses seuls · AdmittedAt = zero
|
||||
|
||||
package "Couche 2 — Indexeurs seeds" #E9F7EF {
|
||||
component "Indexer A\n(seed, AdmittedAt=0)" as IA #A9DFBF
|
||||
component "Indexer B\n(seed, AdmittedAt=0)" as IB #A9DFBF
|
||||
}
|
||||
|
||||
package "Couche 3 — Nœuds" #FEFBD8 {
|
||||
component "Node 1" as N1 #FAF0BE
|
||||
component "Node 2" as N2 #FAF0BE
|
||||
}
|
||||
|
||||
note as NNative #FFDDDD
|
||||
Aucun natif configuré.
|
||||
AdmittedAt = zero → IsStableVoter() = false
|
||||
Phase 2 sans votants : Phase 1 conservée directement.
|
||||
Risque D20 : circularité du trust (seeds se valident mutuellement).
|
||||
end note
|
||||
|
||||
' Heartbeats nœuds → indexeurs seeds
|
||||
N1 -[#E67E22]--> IA : heartbeat long-lived (20s)
|
||||
N1 -[#E67E22]--> IB : heartbeat long-lived (20s)
|
||||
N2 -[#E67E22]--> IA : heartbeat long-lived (20s)
|
||||
N2 -[#E67E22]--> IB : heartbeat long-lived (20s)
|
||||
|
||||
note bottom of IA
|
||||
Après 2s : goroutine async
|
||||
fetchNativeFromIndexers → ?
|
||||
Si natif trouvé → ConnectToNatives (upgrade vers C1)
|
||||
Si non → mode indexeur pur (D20 actif)
|
||||
end note
|
||||
|
||||
@enduml
|
||||
63
docs/diagrams/17_startup_consensus_phase1_phase2.puml
Normal file
63
docs/diagrams/17_startup_consensus_phase1_phase2.puml
Normal file
@@ -0,0 +1,63 @@
|
||||
@startuml 17_startup_consensus_phase1_phase2
|
||||
title Démarrage avec natifs — Phase 1 (admission) + Phase 2 (vivacité)
|
||||
|
||||
participant "Node / Indexer\n(appelant)" as Caller
|
||||
participant "Native A" as NA
|
||||
participant "Native B" as NB
|
||||
participant "Indexer A" as IA
|
||||
participant "Indexer B" as IB
|
||||
|
||||
note over Caller: ConnectToNatives()\nNativeIndexerAddresses configuré
|
||||
|
||||
== Étape 0 : heartbeat vers le mesh natif ==
|
||||
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
|
||||
== Étape 1 : fetch pool en parallèle ==
|
||||
par Fetch parallèle (timeout 6s)
|
||||
Caller -> NA: GET /opencloud/native/indexers/1.0\nGetIndexersRequest{Count: max, FillRates demandés}
|
||||
NA -> NA: reachableLiveIndexers()\ntri par w(F) = fillRate×(1−fillRate)
|
||||
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3, IB:0.6}}
|
||||
else
|
||||
Caller -> NB: GET /opencloud/native/indexers/1.0
|
||||
NB -> NB: reachableLiveIndexers()
|
||||
NB --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.3, IB:0.6}}
|
||||
end par
|
||||
|
||||
note over Caller: Fusion + dédup → candidates = [IA, IB]\nisFallback = false
|
||||
|
||||
== Étape 2a : Phase 1 — Admission native (clientSideConsensus) ==
|
||||
par Consensus parallèle (timeout 3s par natif, 4s total)
|
||||
Caller -> NA: /opencloud/native/consensus/1.0\nConsensusRequest{Candidates:[IA,IB]}
|
||||
NA -> NA: croiser avec liveIndexers
|
||||
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
|
||||
else
|
||||
Caller -> NB: /opencloud/native/consensus/1.0\nConsensusRequest{Candidates:[IA,IB]}
|
||||
NB -> NB: croiser avec liveIndexers
|
||||
NB --> Caller: ConsensusResponse{Trusted:[IA], Suggestions:[IC]}
|
||||
end par
|
||||
|
||||
note over Caller: IA → 2/2 votes → confirmé ✓\nIB → 1/2 vote → refusé ✗\nadmittedAt = time.Now()
|
||||
|
||||
== Étape 2b : Phase 2 — Liveness vote (indexerLivenessVote) ==
|
||||
note over Caller: Cherche votants stables dans StaticIndexerMeta\n(AdmittedAt != zero, age >= MinStableAge=2min)
|
||||
|
||||
alt Votants stables disponibles
|
||||
par Phase 2 parallèle (timeout 3s)
|
||||
Caller -> IA: /opencloud/indexer/consensus/1.0\nIndexerConsensusRequest{Candidates:[IA]}
|
||||
IA -> IA: vérifier StreamRecords[ProtocolHB][candidate]\nLastSeen ≤ 2×60s && LastScore ≥ 30
|
||||
IA --> Caller: IndexerConsensusResponse{Alive:[IA]}
|
||||
end par
|
||||
note over Caller: IA confirmé vivant par quorum > 0.5
|
||||
else Aucun votant stable (premier démarrage)
|
||||
note over Caller: Phase 1 conservée directement\n(aucun votant MinStableAge atteint)
|
||||
end
|
||||
|
||||
== Étape 3 : remplacement StaticIndexers ==
|
||||
Caller -> Caller: replaceStaticIndexers(pool={IA}, admittedAt)\nStaticIndexerMeta[IA].AdmittedAt = time.Now()
|
||||
|
||||
== Étape 4 : heartbeat long-lived vers pool ==
|
||||
Caller -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
note over Caller: Pool actif. NudgeIndexerHeartbeat()
|
||||
|
||||
@enduml
|
||||
51
docs/diagrams/18_startup_seed_discovers_native.puml
Normal file
51
docs/diagrams/18_startup_seed_discovers_native.puml
Normal file
@@ -0,0 +1,51 @@
|
||||
@startuml 18_startup_seed_discovers_native
|
||||
title C2 → C1 — Seed découvre un natif (upgrade async)
|
||||
|
||||
participant "Node / Indexer\\n(seed mode)" as Caller
|
||||
participant "Indexer A\\n(seed)" as IA
|
||||
participant "Indexer B\\n(seed)" as IB
|
||||
participant "Native A\\n(découvert)" as NA
|
||||
|
||||
note over Caller: Démarrage sans NativeIndexerAddresses\\nStaticIndexers = [IA, IB] (AdmittedAt=0)
|
||||
|
||||
== Phase initiale seed ==
|
||||
Caller -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
Caller -> IB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
|
||||
note over Caller: Pool actif en mode seed.\\nIsStableVoter() = false (AdmittedAt=0)\\nPhase 2 sans votants → Phase 1 conservée.
|
||||
|
||||
== Goroutine async après 2s ==
|
||||
note over Caller: time.Sleep(2s)\\nfetchNativeFromIndexers()
|
||||
|
||||
Caller -> IA: GET /opencloud/indexer/natives/1.0
|
||||
IA --> Caller: GetNativesResponse{Natives:[NA]}
|
||||
|
||||
note over Caller: Natif découvert : NA\\nAppel ConnectToNatives([NA])
|
||||
|
||||
== Upgrade vers mode nominal (ConnectToNatives) ==
|
||||
Caller -> NA: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
|
||||
par Fetch pool depuis natif (timeout 6s)
|
||||
Caller -> NA: GET /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: max}
|
||||
NA -> NA: reachableLiveIndexers()\\ntri par w(F) = fillRate×(1−fillRate)
|
||||
NA --> Caller: GetIndexersResponse{Indexers:[IA,IB], FillRates:{IA:0.4, IB:0.6}}
|
||||
end par
|
||||
|
||||
note over Caller: candidates = [IA, IB], isFallback = false
|
||||
|
||||
par Consensus Phase 1 (timeout 3s)
|
||||
Caller -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
|
||||
NA -> NA: croiser avec liveIndexers
|
||||
NA --> Caller: ConsensusResponse{Trusted:[IA,IB], Suggestions:[]}
|
||||
end par
|
||||
|
||||
note over Caller: IA ✓ IB ✓ (1/1 vote)\\nadmittedAt = time.Now()
|
||||
|
||||
note over Caller: Aucun votant stable (AdmittedAt vient d'être posé)\\nPhase 2 sautée → Phase 1 conservée directement
|
||||
|
||||
== Remplacement pool ==
|
||||
Caller -> Caller: replaceStaticIndexers(pool={IA,IB}, admittedAt)\\nStaticIndexerMeta[IA].AdmittedAt = time.Now()\\nStaticIndexerMeta[IB].AdmittedAt = time.Now()
|
||||
|
||||
note over Caller: Pool upgradé dans la map partagée StaticIndexers.\\nLa goroutine heartbeat existante (démarrée en mode seed)\\ndétecte les nouveaux membres sur le prochain tick (20s).\\nAucune nouvelle goroutine créée.\\nIsStableVoter() deviendra true après MinStableAge (2min).\\nD20 (circularité seeds) éliminé.
|
||||
|
||||
@enduml
|
||||
55
docs/diagrams/19_failure_indexer_crash.puml
Normal file
55
docs/diagrams/19_failure_indexer_crash.puml
Normal file
@@ -0,0 +1,55 @@
|
||||
@startuml failure_indexer_crash
|
||||
title Indexer Failure → replenish from a Native
|
||||
|
||||
participant "Node" as N
|
||||
participant "Indexer A (alive)" as IA
|
||||
participant "Indexer B (crashed)" as IB
|
||||
participant "Native A" as NA
|
||||
participant "Native B" as NB
|
||||
|
||||
note over N: Active Pool : Indexers = [IA, IB]\\nActive Heartbeat long-lived from IA & IB
|
||||
|
||||
== IB Failure ==
|
||||
IB ->x N: heartbeat fails (sendHeartbeat err)
|
||||
note over N: doTick() dans SendHeartbeat triggers failure\\n→ delete(Indexers[IB])\\n→ delete(IndexerMeta[IB])\\nUnique heartbeat goroutine continue
|
||||
|
||||
N -> N: go replenishIndexersFromNative(need=1)
|
||||
|
||||
note over N: Reduced Pool to 1 indexers.\\nReplenish triggers with goroutine.
|
||||
|
||||
== Replenish from natives ==
|
||||
par Fetch pool (timeout 6s)
|
||||
N -> NA: GET /opencloud/native/indexers/1.0\\nGetIndexersRequest{Count: max}
|
||||
NA -> NA: reachableLiveIndexers()\\n(IB absent because of a expired heartbeat)
|
||||
NA --> N: GetIndexersResponse{Indexers:[IA,IC], FillRates:{IA:0.4,IC:0.2}}
|
||||
else
|
||||
N -> NB: GET /opencloud/native/indexers/1.0
|
||||
NB --> N: GetIndexersResponse{Indexers:[IA,IC]}
|
||||
end par
|
||||
|
||||
note over N: Fusion + duplication → candidates = [IA, IC]\\n(IA already in pool → IC new candidate)
|
||||
|
||||
par Consensus Phase 1 (timeout 4s)
|
||||
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IC]}
|
||||
NA --> N: ConsensusResponse{Trusted:[IA,IC]}
|
||||
else
|
||||
N -> NB: /opencloud/native/consensus/1.0
|
||||
NB --> N: ConsensusResponse{Trusted:[IA,IC]}
|
||||
end par
|
||||
|
||||
note over N: IC → 2/2 votes → admit\\nadmittedAt = time.Now()
|
||||
|
||||
par Phase 2 — liveness vote (if stable voters )
|
||||
N -> IA: /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IC]}
|
||||
IA -> IA: StreamRecords[ProtocolHB][IC]\\nLastSeen ≤ 120s && LastScore ≥ 30
|
||||
IA --> N: IndexerConsensusResponse{Alive:[IC]}
|
||||
end par
|
||||
|
||||
note over N: IC confirmed alive → add to pool
|
||||
|
||||
N -> N: replaceStaticIndexers(pool={IA,IC})
|
||||
N -> IC: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine long-live)
|
||||
|
||||
note over N: Pool restaured to 2 indexers.
|
||||
|
||||
@enduml
|
||||
51
docs/diagrams/20_failure_indexers_native_falback.puml
Normal file
51
docs/diagrams/20_failure_indexers_native_falback.puml
Normal file
@@ -0,0 +1,51 @@
|
||||
@startuml failure_indexers_native_falback
|
||||
title indexers failures → native IsSelfFallback
|
||||
|
||||
participant "Node" as N
|
||||
participant "Indexer A (crashed)" as IA
|
||||
participant "Indexer B (crashed)" as IB
|
||||
participant "Native A" as NA
|
||||
participant "Native B" as NB
|
||||
|
||||
note over N: Active Pool : Indexers = [IA, IB]
|
||||
|
||||
== Successive Failures on IA & IB ==
|
||||
IA ->x N: heartbeat failure (sendHeartbeat err)
|
||||
IB ->x N: heartbeat failure (sendHeartbeat err)
|
||||
|
||||
note over N: doTick() in SendHeartbeat triggers failures\\n→ delete(StaticIndexers[IA]), delete(StaticIndexers[IB])\\n→ delete(StaticIndexerMeta[IA/IB])\\n unique heartbeat goroutine continue.
|
||||
|
||||
N -> N: go replenishIndexersFromNative(need=2)
|
||||
|
||||
== Replenish attempt — natives switches to self-fallback mode ==
|
||||
par Fetch from natives (timeout 6s)
|
||||
N -> NA: GET /opencloud/native/indexers/1.0
|
||||
NA -> NA: reachableLiveIndexers() → 0 alive indexer\\nFallback : included as himself(IsSelfFallback=true)
|
||||
NA --> N: GetIndexersResponse{Indexers:[NA_addr], IsSelfFallback:true}
|
||||
else
|
||||
N -> NB: GET /opencloud/native/indexers/1.0
|
||||
NB --> N: GetIndexersResponse{Indexers:[NB_addr], IsSelfFallback:true}
|
||||
end par
|
||||
|
||||
note over N: isFallback=true → resolvePool avoids consensus\\nadmittedAt = time.Time{} (zero)\\nStaticIndexers = {NA_addr} (native as fallback)
|
||||
|
||||
N -> NA: SendHeartbeat /opencloud/heartbeat/1.0\\n(native as temporary fallback indexers)
|
||||
|
||||
note over NA: responsiblePeers[N] registered.\\nrunOffloadLoop look after real indexers.
|
||||
|
||||
== Reprise IA → runOffloadLoop native side ==
|
||||
IA -> NA: /opencloud/native/subscribe/1.0\\nIndexerRegistration{FillRate: 0}
|
||||
note over NA: liveIndexers[IA] updated.\\nrunOffloadLoop triggers a real available indexer\\migrate from N to IA.
|
||||
|
||||
== Replenish on next heartbeat tick ==
|
||||
N -> NA: GET /opencloud/native/indexers/1.0
|
||||
NA --> N: GetIndexersResponse{Indexers:[IA], IsSelfFallback:false}
|
||||
|
||||
note over N: isFallback=false → Classic Phase 1 + Phase 2
|
||||
|
||||
N -> N: replaceStaticIndexers(pool={IA}, admittedAt)
|
||||
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
|
||||
note over N: Pool restaured. Native self extracted as indexer.
|
||||
|
||||
@enduml
|
||||
46
docs/diagrams/21_failure_native_one_down.puml
Normal file
46
docs/diagrams/21_failure_native_one_down.puml
Normal file
@@ -0,0 +1,46 @@
|
||||
@startuml failure_native_one_down
|
||||
title Native failure, with one still alive
|
||||
|
||||
participant "Indexer A" as IA
|
||||
participant "Indexer B" as IB
|
||||
participant "Native A (crashed)" as NA
|
||||
participant "Native B (alive)" as NB
|
||||
participant "Node" as N
|
||||
|
||||
note over IA, NB: Native State : IA, IB heartbeats to NA & NB
|
||||
|
||||
== Native A Failure ==
|
||||
NA ->x IA: stream reset
|
||||
NA ->x IB: stream reset
|
||||
NA ->x N: stream reset (heartbeat Node → NA)
|
||||
|
||||
== Indexers side : replenishNativesFromPeers ==
|
||||
note over IA: SendHeartbeat(NA) détecte reset\\nAfterDelete(NA)\\nStaticNatives = [NB] (still 1)
|
||||
|
||||
IA -> IA: replenishNativesFromPeers()\\nphase 1 : fetchNativeFromNatives
|
||||
|
||||
IA -> NB: GET /opencloud/native/peers/1.0
|
||||
NB --> IA: GetPeersResponse{Peers:[NC]} /' new native if one known '/
|
||||
|
||||
alt NC disponible
|
||||
IA -> NC: SendHeartbeat /opencloud/heartbeat/1.0\\nSubscribe /opencloud/native/subscribe/1.0
|
||||
note over IA: StaticNatives = [NB, NC]\\nNative Pool restored.
|
||||
else Aucun peer natif
|
||||
IA -> IA: fetchNativeFromIndexers()\\nAsk to any indexers their natives
|
||||
IB --> IA: GetNativesResponse{Natives:[]} /' IB also only got NB '/
|
||||
note over IA: Impossible to find a 2e native.\\nStaticNatives = [NB] (degraded but alive).
|
||||
end
|
||||
|
||||
== Node side : alive indexers pool ==
|
||||
note over N: Node heartbeats to IA & IB.\\nNA Failure does not affect indexers pool.\\nFuture Consensus did not use NB (1/1 vote = quorum OK).
|
||||
|
||||
N -> NB: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
|
||||
NB --> N: ConsensusResponse{Trusted:[IA,IB]}
|
||||
note over N: Consensus 1/1 alive natif → admit.\\nAuto downgrade of the consensus floor (alive majority).
|
||||
|
||||
== NB side : heartbeat to NA fails ==
|
||||
note over NB: EnsureNativePeers / SendHeartbeat to NA\\nfail (sendHeartbeat err)\\n→ delete(StaticNatives[NA])\\nreplenishNativesFromPeers(NA) triggers
|
||||
|
||||
note over NB: Mesh natif downgraded to NB alone.\\Downgraded but functionnal.
|
||||
|
||||
@enduml
|
||||
60
docs/diagrams/22_failure_both_natives.puml
Normal file
60
docs/diagrams/22_failure_both_natives.puml
Normal file
@@ -0,0 +1,60 @@
|
||||
@startuml 22_failure_both_natives
|
||||
title F4 — Panne des 2 natifs → fallback pool pré-validé
|
||||
|
||||
participant "Node" as N
|
||||
participant "Indexer A\\n(vivant)" as IA
|
||||
participant "Indexer B\\n(vivant)" as IB
|
||||
participant "Native A\\n(crashé)" as NA
|
||||
participant "Native B\\n(crashé)" as NB
|
||||
|
||||
note over N: Pool actif : StaticIndexers = [IA, IB]\\nStaticNatives = [NA, NB]\\nAdmittedAt[IA] et AdmittedAt[IB] posés (stables)
|
||||
|
||||
== Panne simultanée NA et NB ==
|
||||
NA ->x N: stream reset
|
||||
NB ->x N: stream reset
|
||||
|
||||
N -> N: AfterDelete(NA) + AfterDelete(NB)\\nStaticNatives = {} (vide)
|
||||
|
||||
== replenishNativesFromPeers (sans résultat) ==
|
||||
N -> N: fetchNativeFromNatives() → aucun natif vivant
|
||||
N -> IA: GET /opencloud/indexer/natives/1.0
|
||||
IA --> N: GetNativesResponse{Natives:[NA,NB]}
|
||||
note over N: NA et NB connus mais non joignables.\\nAucun nouveau natif trouvé.
|
||||
|
||||
== Fallback : pool d'indexeurs conservé ==
|
||||
note over N: isFallback = true\\nStaticIndexers conservé tel quel [IA, IB]\\n(dernier pool validé avec AdmittedAt != zero)\\nRisque D19 atténué : quorum natif = 0 → fallback accepté
|
||||
|
||||
note over N: Heartbeats IA et IB continuent normalement.\\nPool d'indexeurs opérationnel sans natifs.
|
||||
|
||||
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0 (continue)
|
||||
N -> IB: SendHeartbeat /opencloud/heartbeat/1.0 (continue)
|
||||
|
||||
== retryLostNative (30s ticker) ==
|
||||
loop toutes les 30s
|
||||
N -> N: retryLostNative()\\ntente reconnexion NA et NB
|
||||
N -> NA: dial (échec)
|
||||
N -> NB: dial (échec)
|
||||
note over N: Retry sans résultat.\\nPool indexeurs maintenu en fallback.
|
||||
end
|
||||
|
||||
== Reprise natifs ==
|
||||
NA -> NA: redémarrage
|
||||
NB -> NB: redémarrage
|
||||
|
||||
N -> NA: dial (succès)
|
||||
N -> NA: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
N -> NB: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
note over N: StaticNatives = [NA, NB] restauré\\nisFallback = false
|
||||
|
||||
== Re-consensus pool indexeurs (optionnel) ==
|
||||
par Consensus Phase 1
|
||||
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IB]}
|
||||
NA --> N: ConsensusResponse{Trusted:[IA,IB]}
|
||||
else
|
||||
N -> NB: /opencloud/native/consensus/1.0
|
||||
NB --> N: ConsensusResponse{Trusted:[IA,IB]}
|
||||
end par
|
||||
|
||||
note over N: Pool [IA,IB] reconfirmé.\\nisFallback = false. AdmittedAt[IA,IB] rafraîchi.
|
||||
|
||||
@enduml
|
||||
63
docs/diagrams/23_failure_native_plus_indexer.puml
Normal file
63
docs/diagrams/23_failure_native_plus_indexer.puml
Normal file
@@ -0,0 +1,63 @@
|
||||
@startuml 23_failure_native_plus_indexer
|
||||
title F5 — Panne combinée : 1 natif + 1 indexeur
|
||||
|
||||
participant "Node" as N
|
||||
participant "Indexer A\\n(vivant)" as IA
|
||||
participant "Indexer B\\n(crashé)" as IB
|
||||
participant "Native A\\n(vivant)" as NA
|
||||
participant "Native B\\n(crashé)" as NB
|
||||
|
||||
note over N: Pool nominal : StaticIndexers=[IA,IB], StaticNatives=[NA,NB]
|
||||
|
||||
== Pannes simultanées NB + IB ==
|
||||
NB ->x N: stream reset
|
||||
IB ->x N: stream reset
|
||||
|
||||
N -> N: AfterDelete(NB) — StaticNatives = [NA]
|
||||
N -> N: AfterDelete(IB) — StaticIndexers = [IA]
|
||||
|
||||
== Replenish natif (1 vivant) ==
|
||||
N -> N: replenishNativesFromPeers()
|
||||
N -> NA: GET /opencloud/native/peers/1.0
|
||||
NA --> N: GetPeersResponse{Peers:[]} /' NB seul pair, disparu '/
|
||||
note over N: Aucun natif alternatif.\\nStaticNatives = [NA] — dégradé.
|
||||
|
||||
== Replenish indexeur depuis NA ==
|
||||
par Fetch pool (timeout 6s)
|
||||
N -> NA: GET /opencloud/native/indexers/1.0
|
||||
NA -> NA: reachableLiveIndexers()\\n(IB absent — heartbeat expiré)
|
||||
NA --> N: GetIndexersResponse{Indexers:[IA,IC], FillRates:{IA:0.5,IC:0.3}}
|
||||
end par
|
||||
|
||||
note over N: candidates = [IA, IC]
|
||||
|
||||
par Consensus Phase 1 — 1 seul natif vivant (timeout 3s)
|
||||
N -> NA: /opencloud/native/consensus/1.0\\nConsensusRequest{Candidates:[IA,IC]}
|
||||
NA --> N: ConsensusResponse{Trusted:[IA,IC]}
|
||||
end par
|
||||
|
||||
note over N: IC → 1/1 vote → admis (quorum sur vivants)\\nadmittedAt = time.Now()
|
||||
|
||||
par Phase 2 liveness vote
|
||||
N -> IA: /opencloud/indexer/consensus/1.0\\nIndexerConsensusRequest{Candidates:[IC]}
|
||||
IA -> IA: StreamRecords[ProtocolHB][IC]\\nLastSeen ≤ 120s && LastScore ≥ 30
|
||||
IA --> N: IndexerConsensusResponse{Alive:[IC]}
|
||||
end par
|
||||
|
||||
N -> N: replaceStaticIndexers(pool={IA,IC})
|
||||
N -> IC: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
|
||||
note over N: Pool restauré à [IA,IC].\\nMode dégradé : 1 natif seulement.\\nretryLostNative(NB) actif (30s ticker).
|
||||
|
||||
== retryLostNative pour NB ==
|
||||
loop toutes les 30s
|
||||
N -> NB: dial (échec)
|
||||
end
|
||||
|
||||
NB -> NB: redémarrage
|
||||
NB -> NA: heartbeat (mesh natif reconstruit)
|
||||
N -> NB: dial (succès)
|
||||
N -> NB: SendHeartbeat /opencloud/heartbeat/1.0
|
||||
note over N: StaticNatives = [NA,NB] restauré.\\nMode nominal retrouvé.
|
||||
|
||||
@enduml
|
||||
45
docs/diagrams/24_failure_retry_lost_native.puml
Normal file
45
docs/diagrams/24_failure_retry_lost_native.puml
Normal file
@@ -0,0 +1,45 @@
|
||||
@startuml 24_failure_retry_lost_native
|
||||
title F6 — retryLostNative : reconnexion natif après panne réseau
|
||||
|
||||
participant "Node / Indexer" as Caller
|
||||
participant "Native A\\n(vivant)" as NA
|
||||
participant "Native B\\n(réseau instable)" as NB
|
||||
|
||||
note over Caller: StaticNatives = [NA, NB]\\nHeartbeats actifs vers NA et NB
|
||||
|
||||
== Panne réseau transitoire vers NB ==
|
||||
NB ->x Caller: stream reset (timeout réseau)
|
||||
|
||||
Caller -> Caller: AfterDelete(NB)\\nStaticNatives = [NA]\\nlostNatives.Store(NB.addr)
|
||||
|
||||
== replenishNativesFromPeers — phase 1 ==
|
||||
Caller -> NA: GET /opencloud/native/peers/1.0
|
||||
NA --> Caller: GetPeersResponse{Peers:[NB]}
|
||||
|
||||
note over Caller: NB connu de NA, tentative de reconnexion directe
|
||||
|
||||
Caller -> NB: dial (échec — réseau toujours coupé)
|
||||
note over Caller: Connexion impossible.\\nPassage en retryLostNative()
|
||||
|
||||
== retryLostNative : ticker 30s ==
|
||||
loop toutes les 30s tant que NB absent
|
||||
Caller -> Caller: retryLostNative()\\nParcourt lostNatives
|
||||
Caller -> NB: StartNativeRegistration (dial + heartbeat + subscribe)
|
||||
NB --> Caller: dial échoue
|
||||
note over Caller: Retry loggé. Prochain essai dans 30s.
|
||||
end
|
||||
|
||||
== Réseau rétabli ==
|
||||
note over NB: Réseau rétabli\\nNB de nouveau joignable
|
||||
|
||||
Caller -> NB: StartNativeRegistration\\ndial (succès)
|
||||
Caller -> NB: SendHeartbeat /opencloud/heartbeat/1.0 (goroutine longue durée)
|
||||
Caller -> NB: /opencloud/native/subscribe/1.0\\nIndexerRegistration{FillRate: fillRateFn()}
|
||||
|
||||
NB --> Caller: subscribe ack
|
||||
|
||||
Caller -> Caller: lostNatives.Delete(NB.addr)\\nStaticNatives = [NA, NB] restauré
|
||||
|
||||
note over Caller: Mode nominal retrouvé.\\nnativeHeartbeatOnce non utilisé (goroutine déjà active pour NA).\\nNouvelle goroutine SendHeartbeat pour NB uniquement.
|
||||
|
||||
@enduml
|
||||
35
docs/diagrams/25_failure_node_gc.puml
Normal file
35
docs/diagrams/25_failure_node_gc.puml
Normal file
@@ -0,0 +1,35 @@
|
||||
@startuml 25_failure_node_gc
|
||||
title F7 — Crash nœud → GC indexeur + AfterDelete
|
||||
|
||||
participant "Node\n(crashé)" as N
|
||||
participant "Indexer A" as IA
|
||||
participant "Indexer B" as IB
|
||||
|
||||
note over N, IB: État nominal : N heartbeatait vers IA et IB
|
||||
|
||||
== Crash Node ==
|
||||
N ->x IA: stream reset (heartbeat coupé)
|
||||
N ->x IB: stream reset (heartbeat coupé)
|
||||
|
||||
== GC côté Indexer A ==
|
||||
note over IA: HandleHeartbeat : stream reset détecté\nStreamRecords[ProtocolHeartbeat][N].Expiry figé
|
||||
|
||||
loop ticker GC (30s) — StartGC(30*time.Second)
|
||||
IA -> IA: gc()\nnow.After(Expiry) où Expiry = lastHBTime + 2min\n→ si 2min sans heartbeat → éviction
|
||||
IA -> IA: delete(StreamRecords[ProtocolHeartbeat][N])\nAfterDelete(N, name, did) appelé hors lock
|
||||
note over IA: N retiré du registre vivant.\nFillRate recalculé : (n-1) / MaxNodesConn()
|
||||
end
|
||||
|
||||
== Impact fill rate ==
|
||||
note over IA: FillRate diminue.\nProchain BuildHeartbeatResponse\ninclura FillRate mis à jour.\nSi fillRate revient < 80% :\n→ offload.inBatch et alreadyTried réinitialisés.
|
||||
|
||||
== GC côté Indexer B ==
|
||||
note over IB: Même GC effectué.\nN retiré de StreamRecords[ProtocolHeartbeat].
|
||||
|
||||
== Reconnexion éventuelle du nœud ==
|
||||
N -> N: redémarrage
|
||||
N -> IA: SendHeartbeat /opencloud/heartbeat/1.0\nHeartbeat{name, PeerID_N, IndexersBinded, need, record}
|
||||
IA -> IA: HandleHeartbeat → UptimeTracker(FirstSeen=now)\nStreamRecords[ProtocolHeartbeat][N] recréé\nRepublish PeerRecord N dans DHT
|
||||
note over IA: N de retour avec FirstSeen frais.\ndynamicMinScore élevé tant que age < 24h.\n(phase de grâce : 2 ticks avant scoring)
|
||||
|
||||
@enduml
|
||||
88
docs/diagrams/README.md
Normal file
88
docs/diagrams/README.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# OC-Discovery — Diagrammes d'architecture et de séquence
|
||||
|
||||
Tous les fichiers sont au format [PlantUML](https://plantuml.com/).
|
||||
Rendu possible via VS Code (extension PlantUML), IntelliJ, ou [plantuml.com/plantuml](https://www.plantuml.com/plantuml/uml/).
|
||||
|
||||
> **Note :** Les diagrammes 06, 07, 12, 14–24 et plusieurs protocoles ci-dessous
|
||||
> concernaient l'architecture à 3 niveaux (node → indexer → native indexer),
|
||||
> supprimée dans la branche `feature/no_native_consortium`. Ces fichiers sont
|
||||
> conservés à titre historique. Les diagrammes actifs sont indiqués ci-dessous.
|
||||
|
||||
## Diagrammes actifs (architecture 2 niveaux)
|
||||
|
||||
### Séquences principales
|
||||
|
||||
| Fichier | Description |
|
||||
|---------|-------------|
|
||||
| `01_node_init.puml` | Initialisation d'un Node : libp2p host + PSK + ConnectionGater + ConnectToIndexers + SendHeartbeat + DHT proactive |
|
||||
| `02_node_claim.puml` | Enregistrement du nœud : `claimInfo` + `publishPeerRecord` → indexeurs → DHT |
|
||||
| `03_indexer_heartbeat.puml` | Protocole heartbeat bidirectionnel : challenges PeerID + DHT + witness, scoring 7 dimensions, suggestions, SuggestMigrate |
|
||||
| `04_indexer_publish.puml` | Publication d'un `PeerRecord` vers l'indexeur → DHT (PutValue /node, /name, /pid) |
|
||||
| `05_indexer_get.puml` | Résolution d'un pair : `GetPeerRecord` → indexeur → DHT si absent local |
|
||||
| `08_nats_create_resource.puml` | Handler NATS `CREATE_RESOURCE` : propagation partenaires on-demand |
|
||||
| `09_nats_propagation.puml` | Handler NATS `PROPALGATION_EVENT` : delete, considers, planner, search |
|
||||
| `10_pubsub_search.puml` | Recherche gossip globale (GossipSub /opencloud/search/1.0) |
|
||||
| `11_stream_search.puml` | Recherche directe par stream (type `"known"` ou `"partner"`) |
|
||||
| `13_planner_flow.puml` | Session planner (ouverture, échange, fermeture) |
|
||||
|
||||
### Résilience et pool management
|
||||
|
||||
| Fichier | Description |
|
||||
|---------|-------------|
|
||||
| `hb_failure_evict.puml` | HeartbeatFailure → evictPeer → TriggerConsensus ou DHT replenish |
|
||||
| `hb_last_indexer.puml` | Protection last-indexer → reconnectToSeeds → retryUntilSeedResponds |
|
||||
| `dht_discovery.puml` | Découverte proactive DHT : Provide/FindProviders, SelectByFillRate, dhtCache |
|
||||
| `connection_gater.puml` | ConnectionGater : DB blacklist → DHT sequential check (transport-error fallthrough) |
|
||||
|
||||
## Diagrammes historiques (architecture 3 niveaux — obsolètes)
|
||||
|
||||
Ces fichiers documentent l'ancienne architecture. Ils ne correspondent plus
|
||||
au code en production.
|
||||
|
||||
| Fichier | Description |
|
||||
|---------|-------------|
|
||||
| `06_native_registration.puml` | Enregistrement d'un indexeur auprès du Native (supprimé) |
|
||||
| `07_native_get_consensus.puml` | `ConnectToNatives` : fetch pool + Phase 1 + Phase 2 (supprimé) |
|
||||
| `12_partner_heartbeat.puml` | Heartbeat partner permanent (supprimé — connexions on-demand) |
|
||||
| `14_native_offload_gc.puml` | Boucles background Native Indexer (supprimé) |
|
||||
| `15_archi_config_nominale.puml` | Topologie nominale avec natifs (obsolète) |
|
||||
| `16_archi_config_seed.puml` | Mode seed sans natif (obsolète) |
|
||||
| `17_startup_consensus_phase1_phase2.puml` | Démarrage avec consensus natifs (supprimé) |
|
||||
| `18_startup_seed_discovers_native.puml` | Upgrade seed → native (supprimé) |
|
||||
| `19_failure_indexer_crash.puml` | F1 — replenish depuis natif (supprimé) |
|
||||
| `20_failure_both_indexers_selfdelegate.puml` | F2 — IsSelfFallback native (supprimé) |
|
||||
| `21_failure_native_one_down.puml` | F3 — panne 1 natif (supprimé) |
|
||||
| `22_failure_both_natives.puml` | F4 — panne 2 natifs (supprimé) |
|
||||
| `23_failure_native_plus_indexer.puml` | F5 — panne combinée natif + indexeur (supprimé) |
|
||||
| `24_failure_retry_lost_native.puml` | F6 — retryLostNative (supprimé) |
|
||||
| `25_failure_node_gc.puml` | F7 — GC nœud côté indexeur (toujours valide) |
|
||||
|
||||
## Protocoles libp2p actifs
|
||||
|
||||
| Protocole | Description |
|
||||
|-----------|-------------|
|
||||
| `/opencloud/heartbeat/1.0` | Heartbeat bidirectionnel node→indexeur (long-lived) |
|
||||
| `/opencloud/probe/1.0` | Sonde de bande passante (echo, mesure latence + débit) |
|
||||
| `/opencloud/witness/1.0` | Requête témoin : "quel est ton score de l'indexeur X ?" |
|
||||
| `/opencloud/record/publish/1.0` | Publication `PeerRecord` vers indexeur |
|
||||
| `/opencloud/record/get/1.0` | Requête `GetPeerRecord` vers indexeur |
|
||||
| `/opencloud/resource/search/1.0` | Recherche de ressources entre peers |
|
||||
| `/opencloud/resource/create/1.0` | Propagation création ressource → partner |
|
||||
| `/opencloud/resource/update/1.0` | Propagation mise à jour ressource → partner |
|
||||
| `/opencloud/resource/delete/1.0` | Propagation suppression ressource → partner |
|
||||
| `/opencloud/resource/planner/1.0` | Session planner (booking) |
|
||||
| `/opencloud/resource/verify/1.0` | Vérification signature ressource |
|
||||
| `/opencloud/resource/considers/1.0` | Transmission d'un considers d'exécution |
|
||||
|
||||
## Protocoles supprimés (architecture native)
|
||||
|
||||
| Protocole | Raison |
|
||||
|-----------|--------|
|
||||
| `/opencloud/native/subscribe/1.0` | Tier native supprimé |
|
||||
| `/opencloud/native/unsubscribe/1.0` | Tier native supprimé |
|
||||
| `/opencloud/native/indexers/1.0` | Remplacé par DHT FindProviders |
|
||||
| `/opencloud/native/consensus/1.0` | Remplacé par TriggerConsensus léger |
|
||||
| `/opencloud/native/peers/1.0` | Tier native supprimé |
|
||||
| `/opencloud/indexer/natives/1.0` | Tier native supprimé |
|
||||
| `/opencloud/indexer/consensus/1.0` | Remplacé par TriggerConsensus |
|
||||
| `/opencloud/resource/heartbeat/partner/1.0` | Heartbeat partner supprimé — on-demand |
|
||||
69
docs/diagrams/connection_gater.puml
Normal file
69
docs/diagrams/connection_gater.puml
Normal file
@@ -0,0 +1,69 @@
|
||||
@startuml connection_gater
|
||||
title ConnectionGater — Vérification à l'admission (InterceptSecured)
|
||||
|
||||
participant "Remote Peer\n(inbound)" as Remote
|
||||
participant "libp2p\nhost A" as Host
|
||||
participant "OCConnectionGater" as Gater
|
||||
participant "DB (oc-lib)" as DB
|
||||
participant "Indexer X\n(joignable)" as IX
|
||||
participant "Indexer Y\n(injoignable)" as IY
|
||||
|
||||
Remote -> Host: inbound connection (post-PSK, post-TLS)
|
||||
Host -> Gater: InterceptSecured(dir=Inbound, id=RemotePeerID, conn)
|
||||
|
||||
alt dir == Outbound
|
||||
Gater --> Host: true (outbound toujours autorisé)
|
||||
end
|
||||
|
||||
== Étape 1 : Vérification base de données ==
|
||||
|
||||
Gater -> DB: NewRequestAdmin(PEER).Search(\n Filter: peer_id = RemotePeerID\n)
|
||||
DB --> Gater: []peer.Peer
|
||||
|
||||
alt trouvé AND relation == BLACKLIST
|
||||
Gater --> Host: false (refusé — blacklisté)
|
||||
Host ->x Remote: connexion fermée
|
||||
end
|
||||
|
||||
alt trouvé AND relation != BLACKLIST
|
||||
Gater --> Host: true (connu et non blacklisté)
|
||||
end
|
||||
|
||||
== Étape 2 : Vérification DHT (peer inconnu en DB) ==
|
||||
|
||||
note over Gater: Peer inconnu → vérifier qu'il existe\ndans le réseau DHT
|
||||
|
||||
Gater -> Gater: getReq = GetValue{PeerID: RemotePeerID}
|
||||
|
||||
loop Pour chaque indexeur (ordre aléatoire — Shuffle)
|
||||
|
||||
alt Indexer IY injoignable (transport error)
|
||||
Gater -> IY: h.Connect(ctxTTL, IY_AddrInfo)
|
||||
IY -->x Gater: connexion échouée
|
||||
note over Gater: reachable=false\n→ essaie le suivant
|
||||
end
|
||||
|
||||
alt Indexer IX joignable
|
||||
Gater -> IX: h.Connect(ctxTTL, IX_AddrInfo)
|
||||
IX --> Gater: OK
|
||||
Gater -> IX: TempStream /opencloud/record/get/1.0
|
||||
Gater -> IX: stream.Encode(GetValue{PeerID: RemotePeerID})
|
||||
IX -> IX: Recherche locale + DHT si absent
|
||||
IX --> Gater: GetResponse{Found: true/false, Records}
|
||||
note over Gater: reachable=true → réponse autoritaire\n(DHT distribué : un seul indexeur suffit)
|
||||
|
||||
alt Found == true
|
||||
Gater --> Host: true (pair connu du réseau)
|
||||
else Found == false
|
||||
Gater --> Host: false (refusé — inconnu du réseau)
|
||||
Host ->x Remote: connexion fermée
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
alt Aucun indexeur joignable
|
||||
note over Gater: Réseau naissant ou tous isolés.\nAutorisation par défaut.
|
||||
Gater --> Host: true
|
||||
end
|
||||
|
||||
@enduml
|
||||
56
docs/diagrams/dht_discovery.puml
Normal file
56
docs/diagrams/dht_discovery.puml
Normal file
@@ -0,0 +1,56 @@
|
||||
@startuml dht_discovery
|
||||
title Découverte DHT : Provide/FindProviders + SelectByFillRate + dhtCache indexeur
|
||||
|
||||
participant "Indexer A\n(nouveau)" as IA
|
||||
participant "DHT Network" as DHT
|
||||
participant "Node B\n(bootstrap)" as NodeB
|
||||
participant "Indexer A\n(existant)" as IAexist
|
||||
|
||||
== Inscription indexeur dans la DHT ==
|
||||
|
||||
note over IA: Démarrage IndexerService\nstartDHTProvide(fillRateFn)
|
||||
|
||||
IA -> IA: Attend adresse routable (max 60s)\nnon-loopback disponible
|
||||
|
||||
IA -> DHT: DHT.Bootstrap(ctx)\n→ routing table warmup
|
||||
|
||||
loop ticker RecommendedHeartbeatInterval (~20s)
|
||||
IA -> DHT: DHT.Provide(IndexerCID, true)\n← IndexerCID = CID(sha256("/opencloud/indexers"))
|
||||
note over DHT: L'indexeur est annoncé comme provider.\nTTL géré par libp2p-kad-dht.\nAuto-expire si Provide() s'arrête.
|
||||
end
|
||||
|
||||
== Cache DHT passif de l'indexeur ==
|
||||
|
||||
note over IA: startDHTCacheRefresh()\ngoroutine arrière-plan
|
||||
|
||||
IA -> IA: Initial delay 30s (routing table warmup)
|
||||
|
||||
loop ticker 2min
|
||||
IA -> DHT: DiscoverIndexersFromDHT(h, dht, 30)\n← FindProviders(IndexerCID, max=30)
|
||||
DHT --> IA: []AddrInfo (jusqu'à 30 candidats)
|
||||
IA -> IA: Filtre self\nSelectByFillRate(filtered, nil, 10)\n→ diversité /24, prior f=0.5 (fill rates inconnus)
|
||||
IA -> IA: dhtCache = selected (max 10)\n→ utilisé pour Suggestions dans BuildHeartbeatResponse
|
||||
end
|
||||
|
||||
== Découverte côté Node au bootstrap ==
|
||||
|
||||
NodeB -> NodeB: ConnectToIndexers → seeds ajoutés\nSendHeartbeat démarré
|
||||
|
||||
NodeB -> NodeB: goroutine proactive (après 5s warmup)
|
||||
|
||||
alt discoveryDHT == nil (node pur, pas d'IndexerService)
|
||||
NodeB -> DHT: initNodeDHT(h, seeds)\n← DHT client mode, bootstrappé sur seeds
|
||||
end
|
||||
|
||||
NodeB -> DHT: DiscoverIndexersFromDHT(h, discoveryDHT, need+extra)
|
||||
DHT --> NodeB: []AddrInfo candidats
|
||||
|
||||
NodeB -> NodeB: Filtre self\nSelectByFillRate(candidates, fillRates, need)\n→ pondération w(F) = F×(1-F)\n F=0.2 → w=0.16 (très probable)\n F=0.5 → w=0.25 (max)\n F=0.8 → w=0.16 (peu probable)\n→ filtre diversité /24
|
||||
|
||||
loop Pour chaque candidat retenu
|
||||
NodeB -> NodeB: Indexers.SetAddr(key, &addrInfo)\nNudgeIt() → heartbeat immédiat
|
||||
end
|
||||
|
||||
note over NodeB: Pool enrichi au-delà des seeds.\nScoring commence au premier heartbeat.\nSeeds restent IsSeed=true (stickiness).
|
||||
|
||||
@enduml
|
||||
41
docs/diagrams/hb_failure_evict.puml
Normal file
41
docs/diagrams/hb_failure_evict.puml
Normal file
@@ -0,0 +1,41 @@
|
||||
@startuml hb_failure_evict
|
||||
title HeartbeatFailure → evictPeer → TriggerConsensus ou DHT replenish
|
||||
|
||||
participant "Node A" as NodeA
|
||||
participant "Indexer X\n(défaillant)" as IX
|
||||
participant "Indexer Y\n(voter)" as IY
|
||||
participant "Indexer Z\n(voter)" as IZ
|
||||
participant "DHT" as DHT
|
||||
participant "Indexer NEW\n(candidat)" as INEW
|
||||
|
||||
note over NodeA: SendHeartbeat tick — Indexer X dans le pool
|
||||
|
||||
NodeA -> IX: stream.Encode(Heartbeat{...})
|
||||
IX -->x NodeA: timeout / transport error
|
||||
|
||||
NodeA -> NodeA: HeartbeatFailure(h, proto, dir, addr_X, info_X, isIndexerHB=true, maxPool)
|
||||
|
||||
NodeA -> NodeA: evictPeer(dir, addr_X, id_X, proto)\n→ Streams.Delete(proto, &id_X)\n→ DeleteAddr(addr_X)\n→ DeleteScore(addr_X)\n→ voters = remaining AddrInfos
|
||||
|
||||
NodeA -> NodeA: poolSize = len(dir.GetAddrs())
|
||||
|
||||
alt poolSize == 0
|
||||
NodeA -> NodeA: reconnectToSeeds()\n→ réinjecte IndexerAddresses (IsSeed=true)
|
||||
alt seeds ajoutés
|
||||
NodeA -> NodeA: need = maxPool\nNudgeIt() → tick immédiat
|
||||
else aucun seed configuré ou seeds injoignables
|
||||
NodeA -> NodeA: go retryUntilSeedResponds()\n(backoff 10s→5min, panic si IndexerAddresses vide)
|
||||
end
|
||||
else poolSize > 0 AND len(voters) > 0
|
||||
NodeA -> NodeA: go TriggerConsensus(h, voters, need)
|
||||
NodeA -> IY: stream GET → GetValue{Key: candidate_DID}
|
||||
IY --> NodeA: GetResponse{Found, Records}
|
||||
NodeA -> IZ: stream GET → GetValue{Key: candidate_DID}
|
||||
IZ --> NodeA: GetResponse{Found, Records}
|
||||
note over NodeA: Quorum check:\nfound=true AND lastSeen ≤ 2×interval\nAND lastScore ≥ 30\n→ majorité → admission INEW
|
||||
NodeA -> NodeA: Indexers.SetAddr(addr_NEW, &INEW_AddrInfo)\nIndexers.SetScore(addr_NEW, Score{IsSeed:false})\nNudgeIt()
|
||||
else poolSize > 0 AND len(voters) == 0
|
||||
NodeA -> DHT: go replenishIndexersFromDHT(h, need)\nDiscoverIndexersFromDHT → SelectByFillRate\n→ add to Indexers Directory
|
||||
end
|
||||
|
||||
@enduml
|
||||
46
docs/diagrams/hb_last_indexer.puml
Normal file
46
docs/diagrams/hb_last_indexer.puml
Normal file
@@ -0,0 +1,46 @@
|
||||
@startuml hb_last_indexer
|
||||
title Protection last-indexer → reconnectToSeeds → retryUntilSeedResponds
|
||||
|
||||
participant "Node A" as NodeA
|
||||
participant "Indexer LAST\n(seul restant)" as IL
|
||||
participant "Seed Indexer\n(config)" as SEED
|
||||
participant "DHT" as DHT
|
||||
|
||||
note over NodeA: Pool = 1 indexeur (LAST)\nIsSeed=false, score bas depuis longtemps
|
||||
|
||||
== Tentative d'éviction par score ==
|
||||
NodeA -> NodeA: score < minScore\nAND TotalOnline ≥ 2×interval\nAND !IsSeed\nAND len(pool) > 1 ← FAUX : pool == 1
|
||||
|
||||
note over NodeA: Garde active : len(pool) == 1\n→ éviction par score BLOQUÉE\nLAST reste dans le pool
|
||||
|
||||
== Panne réseau (heartbeat fail) ==
|
||||
NodeA -> IL: stream.Encode(Heartbeat{...})
|
||||
IL -->x NodeA: timeout
|
||||
|
||||
NodeA -> NodeA: HeartbeatFailure → evictPeer(LAST)\npoolSize = 0
|
||||
|
||||
NodeA -> NodeA: reconnectToSeeds()\n→ parse IndexerAddresses (conf)\n→ SetAddr + SetScore(IsSeed=true) pour chaque seed
|
||||
|
||||
alt seeds ajoutés (IndexerAddresses non vide)
|
||||
NodeA -> NodeA: NudgeIt() → tick immédiat
|
||||
NodeA -> SEED: Heartbeat{...} (via SendHeartbeat nudge)
|
||||
SEED --> NodeA: HeartbeatResponse{fillRate, ...}
|
||||
note over NodeA: Pool rétabli via seeds.\nDHT proactive discovery reprend.
|
||||
|
||||
else IndexerAddresses vide
|
||||
NodeA -> NodeA: go retryUntilSeedResponds()
|
||||
note over NodeA: panic immédiat :\n"pool is empty and no seed indexers configured"\n→ arrêt du processus
|
||||
end
|
||||
|
||||
== retryUntilSeedResponds (si seeds non répondants) ==
|
||||
loop backoff exponentiel (10s → 20s → ... → 5min)
|
||||
NodeA -> NodeA: time.Sleep(backoff)
|
||||
NodeA -> NodeA: len(Indexers.GetAddrs()) > 0?\n→ oui : retour (quelqu'un a refillé)
|
||||
NodeA -> NodeA: reconnectToSeeds()
|
||||
alt pool > 0 après reconnect
|
||||
NodeA -> NodeA: NudgeIt()\nDHT.Bootstrap(ctx, 15s)
|
||||
note over NodeA: Sortie de la boucle.\nHeartbeat normal reprend.
|
||||
end
|
||||
end
|
||||
|
||||
@enduml
|
||||
62
go.mod
62
go.mod
@@ -1,65 +1,73 @@
|
||||
module oc-discovery
|
||||
|
||||
go 1.24.6
|
||||
go 1.25.0
|
||||
|
||||
require (
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995
|
||||
github.com/beego/beego v1.12.13
|
||||
github.com/beego/beego/v2 v2.3.8
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/smartystreets/goconvey v1.7.2
|
||||
github.com/tidwall/gjson v1.17.3
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260423081613-747368c79a13
|
||||
github.com/ipfs/go-cid v0.6.0
|
||||
github.com/libp2p/go-libp2p v0.47.0
|
||||
github.com/libp2p/go-libp2p-record v0.3.1
|
||||
github.com/multiformats/go-multiaddr v0.16.1
|
||||
github.com/multiformats/go-multihash v0.2.3
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beego/beego/v2 v2.3.8 // indirect
|
||||
github.com/benbjohnson/clock v1.3.5 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect
|
||||
github.com/dunglas/httpsfv v1.1.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
|
||||
github.com/filecoin-project/go-clock v0.1.0 // indirect
|
||||
github.com/flynn/noise v1.1.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
|
||||
github.com/go-logr/logr v1.4.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/google/gnostic-models v0.7.0 // indirect
|
||||
github.com/google/gopacket v1.1.19 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
|
||||
github.com/huin/goupnp v1.3.0 // indirect
|
||||
github.com/ipfs/boxo v0.35.2 // indirect
|
||||
github.com/ipfs/go-cid v0.6.0 // indirect
|
||||
github.com/ipfs/go-datastore v0.9.0 // indirect
|
||||
github.com/ipfs/go-log/v2 v2.9.1 // indirect
|
||||
github.com/ipld/go-ipld-prime v0.21.0 // indirect
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
|
||||
github.com/koron/go-ssdp v0.0.6 // indirect
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 // indirect
|
||||
github.com/libp2p/go-cidranger v1.1.0 // indirect
|
||||
github.com/libp2p/go-flow-metrics v0.3.0 // indirect
|
||||
github.com/libp2p/go-libp2p v0.47.0 // indirect
|
||||
github.com/libp2p/go-libp2p-asn-util v0.4.1 // indirect
|
||||
github.com/libp2p/go-libp2p-kbucket v0.8.0 // indirect
|
||||
github.com/libp2p/go-libp2p-record v0.3.1 // indirect
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 // indirect
|
||||
github.com/libp2p/go-msgio v0.3.0 // indirect
|
||||
github.com/libp2p/go-netroute v0.4.0 // indirect
|
||||
github.com/libp2p/go-reuseport v0.4.0 // indirect
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect
|
||||
github.com/miekg/dns v1.1.68 // indirect
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect
|
||||
github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect
|
||||
github.com/minio/sha256-simd v1.0.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect
|
||||
github.com/mr-tron/base58 v1.2.0 // indirect
|
||||
github.com/multiformats/go-base32 v0.1.0 // indirect
|
||||
github.com/multiformats/go-base36 v0.2.0 // indirect
|
||||
github.com/multiformats/go-multiaddr v0.16.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-dns v0.4.1 // indirect
|
||||
github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect
|
||||
github.com/multiformats/go-multibase v0.2.0 // indirect
|
||||
github.com/multiformats/go-multicodec v0.10.0 // indirect
|
||||
github.com/multiformats/go-multihash v0.2.3 // indirect
|
||||
github.com/multiformats/go-multistream v0.6.1 // indirect
|
||||
github.com/multiformats/go-varint v0.1.0 // indirect
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect
|
||||
@@ -89,6 +97,7 @@ require (
|
||||
github.com/spaolacci/murmur3 v1.1.0 // indirect
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||
go.opentelemetry.io/otel v1.39.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.39.0 // indirect
|
||||
@@ -99,13 +108,28 @@ require (
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.1 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||
go.yaml.in/yaml/v3 v3.0.4 // indirect
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect
|
||||
golang.org/x/mod v0.32.0 // indirect
|
||||
golang.org/x/oauth2 v0.32.0 // indirect
|
||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 // indirect
|
||||
golang.org/x/term v0.39.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
golang.org/x/tools v0.41.0 // indirect
|
||||
gonum.org/v1/gonum v0.17.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
k8s.io/api v0.35.1 // indirect
|
||||
k8s.io/apimachinery v0.35.1 // indirect
|
||||
k8s.io/client-go v0.35.1 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect
|
||||
lukechampine.com/blake3 v1.4.1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect
|
||||
sigs.k8s.io/yaml v1.6.0 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -117,13 +141,10 @@ require (
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.27.0 // indirect
|
||||
github.com/golang/snappy v1.0.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c // indirect
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/goraz/onion v0.1.3 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libp2p/go-libp2p-kad-dht v0.37.1
|
||||
github.com/libp2p/go-libp2p-pubsub v0.15.0
|
||||
@@ -139,19 +160,14 @@ require (
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.17.0 // indirect
|
||||
github.com/robfig/cron v1.2.0 // indirect
|
||||
github.com/robfig/cron/v3 v3.0.1 // indirect
|
||||
github.com/rs/zerolog v1.34.0 // indirect
|
||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 // indirect
|
||||
github.com/smartystreets/assertions v1.2.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 // indirect
|
||||
github.com/xdg-go/scram v1.1.2 // indirect
|
||||
github.com/xdg-go/stringprep v1.0.4 // indirect
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 // indirect
|
||||
go.mongodb.org/mongo-driver v1.17.4 // indirect
|
||||
golang.org/x/crypto v0.47.0 // indirect
|
||||
golang.org/x/crypto v0.47.0
|
||||
golang.org/x/net v0.49.0 // indirect
|
||||
golang.org/x/sync v0.19.0 // indirect
|
||||
golang.org/x/sys v0.40.0 // indirect
|
||||
|
||||
360
go.sum
360
go.sum
@@ -1,212 +1,139 @@
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250108155542-0f4adeea86be h1:1Yf8ihUxXjOEPqcfgtXJpJ/slxBUHhf7AgS7DZI3iUk=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250108155542-0f4adeea86be/go.mod h1:ya7Q+zHhaKM+XF6sAJ+avqHEVzaMnFJQih2X3TlTlGo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250603080047-03dea551315b h1:yfXDZ0Pw5xTWstsbZWS+MV7G3ZTSvOCTwWQJWRn4Z5k=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250603080047-03dea551315b/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0 h1:iEm/Rf9I0OSCcncuFy61YOSZ3jdRlhJ/oLD97Pc2pCQ=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250604083300-387785b40cb0/go.mod h1:2roQbUpv3a6mTIr5oU1ux31WbN8YucyyQvCQ0FqwbcE=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27 h1:iogk6pV3gybzQDBXMI6Qd/jvSA1h+3oRE+vLl1MRjew=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20250704084459-443546027b27/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260126120055-055e6c70cdd7 h1:LAK86efqe2HNV1Tkym1TpvzL1Xsj3F0ClsK/snfejD0=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260126120055-055e6c70cdd7/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260127143728-3c052bf16572 h1:jrUHgs4DqNWLnLcb5nd4lrJim77+aGkJFACUfMogiu8=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260127143728-3c052bf16572/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140632-d098d253d8e2 h1:B3TO9nXdpGuPXL4X3QFrRMJ1C4zXCQlLh4XR9aSZoKg=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140632-d098d253d8e2/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140807-1c9d7b63c0b3 h1:zAT4ZulAaX+l28QdCMvuXh5XQxn+fU8x6YNJ1zmA7+Q=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128140807-1c9d7b63c0b3/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152242-743f4a6ff742 h1:vGlUqBhj3G5hvskL1NzfecKCUMH8bL3xx7JkLpv/04M=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152242-743f4a6ff742/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152919-7911cf29def8 h1:HT1+PP04wu5DcQ5PA3LtSJ5PcWEyL4FlZB62+v9eLWo=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128152919-7911cf29def8/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128154447-d26789d64e33 h1:WdmHeRtEWV3RsXaEe4HnItGNYLFvMNFggfq9/KtPho0=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128154447-d26789d64e33/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128160440-c0d89ea9e1e8 h1:h7VHJktaTT8TxO4ld3Xjw3LzMsivr3m7mzbNxb44zes=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128160440-c0d89ea9e1e8/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128162702-97cf629e27ec h1:/uvrtEt7A5rwqFPHH8yjujlC33HMjQHhWDIK6I08DrA=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260128162702-97cf629e27ec/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260129121215-c1519f6b26b8 h1:gvUbTwHnYM0Ezzvoa9ylTt+o1lAhS0U79OogbsZ+Pl8=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260129121215-c1519f6b26b8/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260129122033-186ba3e689c7 h1:NRFGRqN+j5g3DrtXMYN5T5XSYICG+OU2DisjBdID3j8=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260129122033-186ba3e689c7/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203074447-30e6c9a6183c h1:c19lIseiUk5Hp+06EowfEbMWH1pK8AC/hvQ4ryWgJtY=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203074447-30e6c9a6183c/go.mod h1:vHWauJsS6ryf7UDqq8hRXoYD5RsONxcFTxeZPOztEuI=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150123-4258f6b58083 h1:nKiU4AfeX+axS4HkaX8i2PJyhSFfRJvzT+CgIv6Jl2o=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150123-4258f6b58083/go.mod h1:T0UCxRd8w+qCVVC0NEyDiWIGC5ADwEbQ7hFcvftd4Ks=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995 h1:ZDRvnzTTNHgMm5hYmseHdEPqQ6rn/4v+P9f/JIxPaNw=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260203150531-ef916fe2d995/go.mod h1:T0UCxRd8w+qCVVC0NEyDiWIGC5ADwEbQ7hFcvftd4Ks=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260423081613-747368c79a13 h1:Qnz5wSliRDl218nK4aCJdPL+rNp55A72UDyUIFDswEQ=
|
||||
cloud.o-forge.io/core/oc-lib v0.0.0-20260423081613-747368c79a13/go.mod h1:JynnOb3eMr9VZW1mHq+Vsl3tzx6gPhPsGKpQD/dtEBc=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Knetic/govaluate v3.0.0+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
|
||||
github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk=
|
||||
github.com/beego/beego v1.12.13 h1:g39O1LGLTiPejWVqQKK/TFGrroW9BCZQz6/pf4S8IRM=
|
||||
github.com/beego/beego v1.12.13/go.mod h1:QURFL1HldOcCZAxnc1cZ7wrplsYR5dKPHFjmk6WkLAs=
|
||||
github.com/beego/beego/v2 v2.3.1 h1:7MUKMpJYzOXtCUsTEoXOxsDV/UcHw6CPbaWMlthVNsc=
|
||||
github.com/beego/beego/v2 v2.3.1/go.mod h1:5cqHsOHJIxkq44tBpRvtDe59GuVRVv/9/tyVDxd5ce4=
|
||||
github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
|
||||
github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
|
||||
github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc=
|
||||
github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg=
|
||||
github.com/beego/goyaml2 v0.0.0-20130207012346-5545475820dd/go.mod h1:1b+Y/CofkYwXMUU0OhQqGvsY2Bvgr4j6jfT699wyZKQ=
|
||||
github.com/beego/x2j v0.0.0-20131220205130-a0352aadc542/go.mod h1:kSeGC/p1AbBiEp5kat81+DSQrZenVBZXklMLaELspWU=
|
||||
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
|
||||
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/biter777/countries v1.7.5 h1:MJ+n3+rSxWQdqVJU8eBy9RqcdH6ePPn4PJHocVWUa+Q=
|
||||
github.com/biter777/countries v1.7.5/go.mod h1:1HSpZ526mYqKJcpT5Ti1kcGQ0L0SrXWIaptUWjFfv2E=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737/go.mod h1:PmM6Mmwb0LSuEubjR8N7PtNe1KxZLtOUHtbeikc5h60=
|
||||
github.com/casbin/casbin v1.7.0/go.mod h1:c67qKN6Oum3UF5Q1+BByfFxkwKvhwW57ITjqwtzR1KE=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/coreos/etcd v3.3.17+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/couchbase/go-couchbase v0.0.0-20201216133707-c04035124b17/go.mod h1:+/bddYDxXsf9qt0xpDUtRR47A2GjaXmGGAqQ/k3GJ8A=
|
||||
github.com/couchbase/gomemcached v0.1.2-0.20201224031647-c432ccf49f32/go.mod h1:mxliKQxOv84gQ0bJWbI+w9Wxdpt9HjDvgW9MjCym5Vo=
|
||||
github.com/couchbase/goutils v0.0.0-20210118111533-e33d3ffb5401/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cupcake/rdb v0.0.0-20161107195141-43ba34106c76/go.mod h1:vYwsqCOLxGiisLwp9rITslkFNpZD5rz43tf41QFkTWY=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU=
|
||||
github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0/go.mod h1:2OfgNZ5wDpcsFmHmCK5gZTPcCXqlm2ArzUIkw9czNJo=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 h1:NMZiJj8QnKe1LgsbDayM4UoHwbvwDRwnI3hwNaAHRnc=
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40=
|
||||
github.com/dunglas/httpsfv v1.1.0 h1:Jw76nAyKWKZKFrpMMcL76y35tOpYHqQPzHQiwDvpe54=
|
||||
github.com/dunglas/httpsfv v1.1.0/go.mod h1:zID2mqw9mFsnt7YC3vYQ9/cjq30q41W+1AnDwH8TiMg=
|
||||
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/elastic/go-elasticsearch/v6 v6.8.5/go.mod h1:UwaDJsD3rWLM5rKNFzv9hgox93HoX8utj1kxD9aFUcI=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/etcd-io/etcd v3.3.17+incompatible/go.mod h1:cdZ77EstHBwVtD6iTgzgvogwcjo9m4iOqoijouPJ4bs=
|
||||
github.com/filecoin-project/go-clock v0.1.0 h1:SFbYIM75M8NnFm1yMHhN9Ahy3W5bEZV9gd6MPfXbKVU=
|
||||
github.com/filecoin-project/go-clock v0.1.0/go.mod h1:4uB/O4PvOjlx1VCMdZ9MyDZXRm//gkj1ELEbxfI1AZs=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.5 h1:J7wGKdGu33ocBOhGy0z653k/lFKLFDPJMG8Gql0kxn4=
|
||||
github.com/gabriel-vasile/mimetype v1.4.5/go.mod h1:ibHel+/kbxn9x2407k1izTA1S81ku1z/DlgOW2QE0M4=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
|
||||
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10 h1:zyueNbySn/z8mJZHLt6IPw0KoZsiQNszIpU+bX4+ZK0=
|
||||
github.com/gabriel-vasile/mimetype v1.4.10/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
|
||||
github.com/glendc/gopher-json v0.0.0-20170414221815-dc4743023d0c/go.mod h1:Gja1A+xZ9BoviGJNA2E9vFkPjjsl+CoJxSXiQM1UXtw=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
|
||||
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
|
||||
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
|
||||
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
|
||||
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
|
||||
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao=
|
||||
github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
|
||||
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
|
||||
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4=
|
||||
github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
|
||||
github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
|
||||
github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=
|
||||
github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c h1:7lF+Vz0LqiRidnzC1Oq86fpX1q/iEv2KJdrCtttYjT4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/goraz/onion v0.1.3 h1:KhyvbDA2b70gcz/d5izfwTiOH8SmrvV43AsVzpng3n0=
|
||||
github.com/goraz/onion v0.1.3/go.mod h1:XEmz1XoBz+wxTgWB8NwuvRm4RAu3vKxvrmYtzK+XCuQ=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
|
||||
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
|
||||
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc=
|
||||
github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/ipfs/boxo v0.35.2 h1:0QZJJh6qrak28abENOi5OA8NjBnZM4p52SxeuIDqNf8=
|
||||
github.com/ipfs/boxo v0.35.2/go.mod h1:bZn02OFWwJtY8dDW9XLHaki59EC5o+TGDECXEbe1w8U=
|
||||
github.com/ipfs/go-block-format v0.2.3 h1:mpCuDaNXJ4wrBJLrtEaGFGXkferrw5eqVvzaHhtFKQk=
|
||||
github.com/ipfs/go-block-format v0.2.3/go.mod h1:WJaQmPAKhD3LspLixqlqNFxiZ3BZ3xgqxxoSR/76pnA=
|
||||
github.com/ipfs/go-cid v0.6.0 h1:DlOReBV1xhHBhhfy/gBNNTSyfOM6rLiIx9J7A4DGf30=
|
||||
github.com/ipfs/go-cid v0.6.0/go.mod h1:NC4kS1LZjzfhK40UGmpXv5/qD2kcMzACYJNntCUiDhQ=
|
||||
github.com/ipfs/go-datastore v0.9.0 h1:WocriPOayqalEsueHv6SdD4nPVl4rYMfYGLD4bqCZ+w=
|
||||
github.com/ipfs/go-datastore v0.9.0/go.mod h1:uT77w/XEGrvJWwHgdrMr8bqCN6ZTW9gzmi+3uK+ouHg=
|
||||
github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk=
|
||||
github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps=
|
||||
github.com/ipfs/go-log/v2 v2.9.1 h1:3JXwHWU31dsCpvQ+7asz6/QsFJHqFr4gLgQ0FWteujk=
|
||||
github.com/ipfs/go-log/v2 v2.9.1/go.mod h1:evFx7sBiohUN3AG12mXlZBw5hacBQld3ZPHrowlJYoo=
|
||||
github.com/ipfs/go-test v0.2.3 h1:Z/jXNAReQFtCYyn7bsv/ZqUwS6E7iIcSpJ2CuzCvnrc=
|
||||
github.com/ipfs/go-test v0.2.3/go.mod h1:QW8vSKkwYvWFwIZQLGQXdkt9Ud76eQXRQ9Ao2H+cA1o=
|
||||
github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E=
|
||||
github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
|
||||
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk=
|
||||
github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
|
||||
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
|
||||
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/koron/go-ssdp v0.0.6 h1:Jb0h04599eq/CY7rB5YEqPS83HmRfHP2azkxMN2rFtU=
|
||||
github.com/koron/go-ssdp v0.0.6/go.mod h1:0R9LfRJGek1zWTjN3JUNlm5INCDYGpRDfAptnct63fI=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
@@ -216,10 +143,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/ledisdb/ledisdb v0.0.0-20200510135210-d35789ec47e6/go.mod h1:n931TsDuKuq+uX4v1fulaMbA/7ZLLhjc85h7chZGBCQ=
|
||||
github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
|
||||
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8=
|
||||
github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg=
|
||||
github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c=
|
||||
@@ -240,6 +165,8 @@ github.com/libp2p/go-libp2p-record v0.3.1 h1:cly48Xi5GjNw5Wq+7gmjfBiG9HCzQVkiZOU
|
||||
github.com/libp2p/go-libp2p-record v0.3.1/go.mod h1:T8itUkLcWQLCYMqtX7Th6r7SexyUJpIyPgks757td/E=
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.5 h1:HdwZj9NKovMx0vqq6YNPTh6aaNzey5zHD7HeLJtq6fI=
|
||||
github.com/libp2p/go-libp2p-routing-helpers v0.7.5/go.mod h1:3YaxrwP0OBPDD7my3D0KxfR89FlcX/IEbxDEDfAmj98=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA=
|
||||
github.com/libp2p/go-libp2p-testing v0.12.0/go.mod h1:KcGDRXyN7sQCllucn1cOOS+Dmm7ujhfEyXQL5lvkcPg=
|
||||
github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0=
|
||||
github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM=
|
||||
github.com/libp2p/go-netroute v0.4.0 h1:sZZx9hyANYUx9PZyqcgE/E1GUG3iEtTZHUEvdtXT7/Q=
|
||||
@@ -249,9 +176,12 @@ github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8S
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1 h1:f0WoX/bEF2E8SbE4c/k1Mo+/9z0O4oC/hWEA+nfYRSg=
|
||||
github.com/libp2p/go-yamux/v5 v5.0.1/go.mod h1:en+3cdX51U0ZslwRdRLrvQsdayFt3TSUKvBGErzpWbU=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/marcopolo/simnet v0.0.4 h1:50Kx4hS9kFGSRIbrt9xUS3NJX33EyPqHVmpXvaKLqrY=
|
||||
github.com/marcopolo/simnet v0.0.4/go.mod h1:tfQF1u2DmaB6WHODMtQaLtClEf3a296CKQLq5gAsIS0=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk=
|
||||
github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
|
||||
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
|
||||
@@ -259,10 +189,9 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
|
||||
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8=
|
||||
github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc=
|
||||
github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKoFL8DUUmalo2yJJUCxbPKtm8OKfqr2/FTNU=
|
||||
@@ -276,9 +205,13 @@ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/montanaflynn/stats v0.7.1 h1:etflOAAHORrCC44V+aR6Ftzort912ZU+YLiSTuV8eaE=
|
||||
github.com/montanaflynn/stats v0.7.1/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
|
||||
github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
|
||||
@@ -308,29 +241,21 @@ github.com/multiformats/go-varint v0.1.0 h1:i2wqFp4sdl3IcIxfAonHQV9qU5OsZ4Ts9IOo
|
||||
github.com/multiformats/go-varint v0.1.0/go.mod h1:5KVAVXegtfmNQQm/lCY+ATvDzvJJhSkUlGQV9wgObdI=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
|
||||
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nats.go v1.43.0 h1:uRFZ2FEoRvP64+UUhaTokyS18XBCR/xM2vQZKO4i8ug=
|
||||
github.com/nats-io/nats.go v1.43.0/go.mod h1:iRWIPokVIFbVijxuMQq4y9ttaBTMe0SFdlZfMDd+33g=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/nkeys v0.4.11 h1:q44qGV008kYd9W1b1nEBkNzvnWxtRSQ7A8BoqRrcfa0=
|
||||
github.com/nats-io/nkeys v0.4.11/go.mod h1:szDimtgmfOi9n25JpfIdGw12tZFYXqhGxjhVxsatHVE=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/ogier/pflag v0.0.1/go.mod h1:zkFki7tvTa0tafRvTBIZTvzYyAu6kQhPZFnshFFPE+g=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
|
||||
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
|
||||
github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=
|
||||
github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=
|
||||
github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||
github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
|
||||
github.com/peterh/liner v1.0.1-0.20171122030339-3681c2a91233/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
@@ -373,46 +298,17 @@ github.com/pion/turn/v4 v4.0.2 h1:ZqgQ3+MjP32ug30xAbD6Mn+/K4Sxi3SdNOTFf+7mpps=
|
||||
github.com/pion/turn/v4 v4.0.2/go.mod h1:pMMKP/ieNAG/fN5cZiN4SDuyKsXtNTr0ccN7IToA1zs=
|
||||
github.com/pion/webrtc/v4 v4.1.2 h1:mpuUo/EJ1zMNKGE79fAdYNFZBX790KE7kQQpLMjjR54=
|
||||
github.com/pion/webrtc/v4 v4.1.2/go.mod h1:xsCXiNAmMEjIdFxAYU0MbB3RwRieJsegSB2JZsGN+8U=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4=
|
||||
github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.0/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
|
||||
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.57.0 h1:Ro/rKjwdq9mZn1K5QPctzh+MA4Lp0BuYk5ZZEVhoNcY=
|
||||
github.com/prometheus/common v0.57.0/go.mod h1:7uRPFSUTbfZWsJ7MHY56sqt7hLQu3bxXHDnNhl8E9qI=
|
||||
github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQPGO4=
|
||||
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE=
|
||||
github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
|
||||
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
|
||||
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
|
||||
@@ -421,28 +317,15 @@ github.com/quic-go/quic-go v0.59.0 h1:OLJkp1Mlm/aS7dpKgTc6cnpynnD2Xg7C1pwL6vy/SA
|
||||
github.com/quic-go/quic-go v0.59.0/go.mod h1:upnsH4Ju1YkqpLXC305eW3yDZ4NfnNbmQRCMWS58IKU=
|
||||
github.com/quic-go/webtransport-go v0.10.0 h1:LqXXPOXuETY5Xe8ITdGisBzTYmUOy5eSj+9n4hLTjHI=
|
||||
github.com/quic-go/webtransport-go v0.10.0/go.mod h1:LeGIXr5BQKE3UsynwVBeQrU1TPrbh73MGoC6jd+V7ow=
|
||||
github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ=
|
||||
github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k=
|
||||
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
|
||||
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
|
||||
github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
||||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY=
|
||||
github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/shiena/ansicolor v0.0.0-20151119151921-a422bbe96644/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg=
|
||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02 h1:v9ezJDHA1XGxViAUSIoO/Id7Fl63u6d0YmsAm+/p2hs=
|
||||
github.com/shiena/ansicolor v0.0.0-20230509054315-a9deabde6e02/go.mod h1:RF16/A3L0xSa0oSERcnhd8Pu3IXSDZSK2gmGIMsttFE=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/siddontang/go v0.0.0-20170517070808-cb568a3e5cc0/go.mod h1:3yhqj7WBBfRhbBlzyOC3gUxftwsU0u8gqevxwIHQpMw=
|
||||
github.com/siddontang/goredis v0.0.0-20150324035039-760763f78400/go.mod h1:DDcKzU3qCuvj/tPnimWSsZZzvk9qvkvrIL5naVBPh5s=
|
||||
github.com/siddontang/rdb v0.0.0-20150307021120-fc89ed2e418d/go.mod h1:AMEsy7v5z92TR1JKMkLLoaOQk++LVnOKL3ScbJ8GNGA=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/skarademir/naturalsort v0.0.0-20150715044055-69a5d87bef62/go.mod h1:oIdVclZaltY1Nf7OQUkg1/2jImBJ+ZfKZuDIRSwk3p0=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs=
|
||||
@@ -452,37 +335,32 @@ github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hg
|
||||
github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM=
|
||||
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
|
||||
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/ssdb/gossdb v0.0.0-20180723034631-88f6b59b84ec/go.mod h1:QBvMkMya+gXctz3kmljlUCu/yB3GZ6oee+dUozsezQE=
|
||||
github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY=
|
||||
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v0.0.0-20160425020131-cfa635847112/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
|
||||
github.com/tidwall/gjson v1.17.3 h1:bwWLZU7icoKRG+C+0PNwIKC6FCJO/Q3p2pZvuP0jN94=
|
||||
github.com/tidwall/gjson v1.17.3/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/ugorji/go v0.0.0-20171122102828-84cb69a8af83/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ=
|
||||
github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw=
|
||||
github.com/wendal/errors v0.0.0-20181209125328-7f31f4b264ec/go.mod h1:Q12BUT7DqIlHRmgv3RskH+UCM/4eqVMgI0EMmlSpAXc=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k=
|
||||
github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc=
|
||||
github.com/wlynxg/anet v0.0.3/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.1.2 h1:FHX5I5B4i4hKRVRBCFRxq1iQRej7WO3hhBuJf+UUySY=
|
||||
@@ -494,11 +372,6 @@ github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfS
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yuin/gopher-lua v0.0.0-20171031051903-609c9cd26973/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU=
|
||||
go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8=
|
||||
go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
|
||||
go.mongodb.org/mongo-driver v1.17.3 h1:TQyXhnsWfWtgAhMtOgtYHMTkZIfBTpMTsMnd9ZBeHxQ=
|
||||
go.mongodb.org/mongo-driver v1.17.3/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
|
||||
go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
@@ -513,6 +386,8 @@ go.uber.org/dig v1.19.0 h1:BACLhebsYdpQ7IROQ1AGPjrXcP5dF80U3gKoFzbaq/4=
|
||||
go.uber.org/dig v1.19.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE=
|
||||
go.uber.org/fx v1.24.0 h1:wE8mruvpg2kiiL1Vqd0CC+tr0/24XIB10Iwp2lLWzkg=
|
||||
go.uber.org/fx v1.24.0/go.mod h1:AmDeGyS+ZARGKM4tlH4FY2Jr63VjbEDJHtqXTGP5hbo=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko=
|
||||
go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o=
|
||||
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
|
||||
@@ -521,25 +396,19 @@ go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
|
||||
go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
|
||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8=
|
||||
golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
|
||||
golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
|
||||
golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU=
|
||||
@@ -552,11 +421,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
@@ -568,43 +434,22 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
|
||||
golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
|
||||
golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
|
||||
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -618,15 +463,10 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
|
||||
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
|
||||
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2 h1:O1cMQHRfwNpDfDJerqRoE2oD+AFlyid87D40L/OkkJo=
|
||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -634,6 +474,8 @@ golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
|
||||
golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
|
||||
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
|
||||
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
@@ -642,12 +484,6 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=
|
||||
golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=
|
||||
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
|
||||
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
|
||||
golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
|
||||
golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
@@ -668,38 +504,42 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=
|
||||
gonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=
|
||||
k8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=
|
||||
k8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=
|
||||
k8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
|
||||
k8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=
|
||||
k8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE=
|
||||
k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck=
|
||||
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
lukechampine.com/blake3 v1.4.1 h1:I3Smz7gso8w4/TunLKec6K2fn+kyKtDxr/xcQEN84Wg=
|
||||
lukechampine.com/blake3 v1.4.1/go.mod h1:QFosUxmjB8mnrWFSNwKmvxHpfY72bmD2tQ0kBMM3kwo=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
865
logs.txt
Normal file
865
logs.txt
Normal file
@@ -0,0 +1,865 @@
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Config file found : /etc/oc/discovery.json"}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Connecting tomongodb://mongo:27017/"}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Connecting mongo client to db DC_myDC"}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Database is READY"}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Config file found : /etc/oc/discovery.json"}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"retrieving private key..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"retrieving psk file..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"open a host..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"Host open on 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN"}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"generate opencloud node..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"connect to indexers..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:16Z","message":"claims my node..."}
|
||||
{"level":"info","proto":"/opencloud/heartbeat/1.0","peers":1,"time":"2026-04-08T06:17:16Z","message":"heartbeat started"}
|
||||
[location] granularity=2 raw=(43.6046,1.4451) fuzzed=(43.5226,-0.7667)
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"run garbage collector..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"connect to partners..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"SetStreamHandler /opencloud/resource/update/1.0"}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"SetStreamHandler /opencloud/resource/delete/1.0"}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"SetStreamHandler /opencloud/resource/create/1.0"}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"subscribe to decentralized search flow..."}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"connect to NATS"}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Node is actually running."}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Listening to propalgation_event"}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Listening to peer_behavior_event"}
|
||||
Published on create_resource
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:21Z","message":"[dht] node DHT client ready"}
|
||||
{"level":"info","need":4,"time":"2026-04-08T06:17:21Z","message":"[dht] proactive indexer discovery from DHT"}
|
||||
{"level":"info","need":4,"time":"2026-04-08T06:17:21Z","message":"[dht] replenishing indexer pool from DHT"}
|
||||
{"level":"info","found":0,"time":"2026-04-08T06:17:21Z","message":"[dht] indexer discovery complete"}
|
||||
{"level":"warn","time":"2026-04-08T06:17:21Z","message":"[dht] no indexers found in DHT for replenishment"}
|
||||
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:31Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:31Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"New Stream engaged as Heartbeat /opencloud/heartbeat/1.0 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu"}
|
||||
{"level":"info","added":1,"from":"12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu","time":"2026-04-08T06:17:36Z","message":"added suggested indexers from heartbeat response"}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"nudge received, heartbeating new indexers immediately"}
|
||||
{"level":"info","time":"2026-04-08T06:17:36Z","message":"New Stream engaged as Heartbeat /opencloud/heartbeat/1.0 12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:17:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:17:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:56Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:56Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:17:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:17:57Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:17:57Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:18:16Z","message":"witness report"}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:18:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:53Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:53Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:18:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:18:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:16Z","message":"witness report"}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629158 [] [23 185 233 217 252 222 167 147 185 9 46 210 169 26 129 42 84 220 137 75 23 42 46 164 85 169 193 238 54 52 179 3 4 189 34 95 188 69 145 100 214 183 69 106 248 20 58 176 61 17 208 250 147 187 154 221 86 213 99 74 6 127 10 3]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629158
|
||||
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629163 [] [148 228 35 212 146 117 198 240 248 47 53 28 217 242 84 16 53 77 141 38 6 249 197 179 135 35 145 140 151 72 185 169 54 204 188 6 20 164 204 214 121 157 67 117 30 95 109 55 108 249 174 141 68 222 186 135 191 90 60 187 194 30 172 5]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629163
|
||||
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629168 [] [48 136 252 209 83 105 251 34 193 50 235 55 55 191 89 233 40 236 174 129 200 65 63 74 57 229 66 155 94 151 91 120 115 163 230 136 81 161 88 35 170 229 126 33 45 134 79 111 7 114 18 209 200 103 43 75 18 170 194 108 197 253 112 13]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629168
|
||||
{"level":"info","time":"2026-04-08T06:19:31Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:31Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629171 [] [216 81 114 23 19 203 244 153 11 214 139 99 54 104 11 30 130 152 134 82 91 69 227 136 79 160 150 92 56 191 14 150 252 100 235 150 93 199 139 179 59 97 131 99 88 55 131 145 246 243 235 31 55 41 42 32 215 75 15 63 31 131 73 8]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629171
|
||||
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:33Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629173 [] [92 242 214 250 125 244 127 81 229 41 112 244 67 188 187 216 10 233 184 185 231 147 125 84 105 192 97 1 65 231 133 104 34 159 166 149 45 186 190 51 117 68 231 163 40 0 58 162 94 88 207 37 206 45 66 178 118 149 54 78 57 1 3 6]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629173
|
||||
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629176 [] [54 239 156 204 25 72 110 223 95 214 112 163 168 169 130 166 57 102 104 232 85 72 119 22 58 182 122 44 2 157 192 190 89 249 35 93 183 96 120 183 23 88 104 83 77 93 3 233 50 231 190 217 162 109 134 212 198 85 206 252 139 156 218 8]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629176
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:19:38Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:38Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629181 [] [127 158 58 208 104 78 156 212 40 94 58 139 31 6 199 56 174 149 133 246 174 53 58 21 177 70 222 188 111 252 100 238 136 147 1 46 4 158 101 6 80 113 125 136 135 228 149 240 212 154 152 92 37 146 163 141 150 95 133 35 62 170 180 9]}
|
||||
sendPlanner 0
|
||||
{"level":"info","time":"2026-04-08T06:19:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629181
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:19:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629187 [] [151 223 225 231 72 46 42 81 219 141 98 116 110 73 2 184 17 222 237 90 217 214 53 151 144 3 88 16 241 119 18 173 3 150 226 167 148 111 83 87 225 120 78 69 38 135 253 164 97 128 66 87 219 207 190 235 196 92 50 89 116 246 17 5]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629187
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629192 [] [111 224 143 159 179 190 133 150 240 14 180 172 240 228 53 86 220 117 5 151 202 186 17 35 105 82 21 231 133 155 7 170 96 134 221 61 23 6 146 1 75 253 32 58 126 98 140 4 246 87 24 230 62 83 114 168 163 74 152 53 217 201 229 14]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629192
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:19:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:19:57Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629197 [] [59 20 147 21 143 23 216 117 191 182 95 226 35 78 36 207 4 147 88 202 120 62 22 123 80 205 240 83 239 70 176 59 211 78 229 145 234 44 172 196 61 193 28 121 145 215 214 169 165 82 57 233 164 87 239 32 6 86 196 18 218 74 227 14]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629197
|
||||
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:02Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629202 [] [54 107 90 122 30 116 226 174 104 100 37 235 211 254 239 142 225 51 128 143 22 29 195 114 125 40 197 196 131 65 67 63 62 101 56 144 212 113 33 95 153 254 238 194 71 3 99 121 190 135 33 167 83 49 137 168 113 133 181 155 63 214 176 6]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629202
|
||||
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629207 [] [57 134 104 85 240 207 200 242 9 59 251 115 73 171 5 39 138 118 184 194 201 211 132 32 254 74 163 221 220 79 102 201 107 3 87 49 213 235 164 31 174 104 209 246 196 64 85 15 248 99 69 165 153 175 40 77 115 61 129 112 36 53 47 13]}
|
||||
sendPlanner 0
|
||||
{"level":"info","time":"2026-04-08T06:20:07Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629207
|
||||
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629212 [] [215 117 62 147 102 252 227 9 93 160 68 16 169 186 89 164 56 59 42 225 102 42 57 242 128 46 18 182 109 94 140 95 167 254 240 34 108 229 225 171 28 17 34 113 122 164 121 212 83 227 121 8 11 165 150 5 227 179 156 60 185 75 67 7]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629212
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:16Z","message":"witness report"}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629217 [] [122 210 113 143 218 90 178 12 159 83 141 245 210 208 198 0 73 80 127 229 163 221 103 110 98 102 228 198 152 201 57 180 167 246 85 75 85 203 223 139 131 99 54 151 42 36 243 202 241 32 178 202 63 198 104 92 210 26 248 230 115 28 120 6]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629217
|
||||
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
{"level":"info","time":"2026-04-08T06:20:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629222 [] [98 74 235 201 133 101 129 234 143 83 255 18 141 176 169 45 134 208 161 116 31 110 66 138 163 128 202 143 80 171 55 4 159 83 100 155 218 40 66 102 245 17 165 102 131 206 245 95 121 136 129 109 98 175 54 101 67 18 229 19 255 170 236 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629222
|
||||
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629227 [] [156 122 36 54 136 116 3 84 110 253 41 115 42 125 114 202 247 54 2 16 150 129 247 202 165 131 236 228 103 244 233 202 31 61 247 47 108 182 244 136 187 187 51 2 2 212 167 180 28 138 87 1 255 253 5 52 40 206 211 89 171 106 46 8]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629227
|
||||
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629232 [] [27 218 207 190 133 114 43 118 66 246 46 131 169 53 78 44 118 11 115 146 75 78 192 70 20 132 177 126 116 41 4 82 91 182 92 95 157 252 39 218 96 59 222 182 159 171 151 93 63 254 212 244 243 149 248 234 177 108 160 163 77 179 99 4]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629232
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629237 [] [66 73 1 165 227 98 102 241 100 114 13 76 157 152 186 255 91 225 92 239 119 226 131 110 63 17 118 163 218 216 82 77 159 127 112 189 232 58 185 236 186 247 44 208 20 57 214 123 0 44 238 132 124 168 240 210 43 74 29 58 252 224 140 15]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629237
|
||||
{"level":"info","time":"2026-04-08T06:20:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:43Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:43Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629243 [] [187 179 204 44 46 42 148 223 7 92 219 164 86 92 179 130 108 225 153 122 216 227 255 58 75 154 36 41 149 225 159 57 71 142 104 244 194 61 123 52 129 98 103 234 232 42 181 251 19 101 190 168 128 101 227 32 135 240 54 16 194 185 153 10]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629243
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:48Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629248 [] [210 69 3 179 113 27 162 199 166 128 158 136 142 219 138 230 173 237 41 106 245 196 147 23 14 246 101 102 117 21 146 63 205 112 83 92 255 2 178 177 120 187 223 76 203 219 106 93 152 155 143 42 128 210 49 156 116 68 142 33 156 134 72 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629248
|
||||
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:53Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629253 [] [98 113 231 230 60 162 99 109 134 79 56 165 55 34 199 155 193 206 14 147 77 204 30 202 204 132 175 84 98 196 126 38 127 234 144 60 176 102 123 90 23 253 63 94 102 85 33 245 228 39 49 203 81 105 59 58 78 215 9 146 196 129 214 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629253
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:20:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:20:58Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629258 [] [162 228 163 147 196 70 190 233 194 135 209 58 118 66 252 60 11 21 153 184 75 46 15 166 74 81 139 192 70 155 217 61 180 34 31 231 187 121 142 194 72 235 89 198 123 92 0 94 4 162 140 123 107 128 40 211 154 155 33 20 231 108 189 15]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629258
|
||||
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:03Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629263 [] [20 112 147 99 13 17 196 30 106 208 129 192 89 88 89 29 13 237 42 200 147 35 51 171 245 211 141 82 162 241 75 242 52 123 186 189 210 57 228 251 188 71 136 13 244 154 120 85 17 120 2 195 170 105 95 66 197 156 141 201 193 225 101 14]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629263
|
||||
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:08Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629268 [] [110 141 235 237 232 239 196 232 36 2 113 65 208 171 45 101 205 210 104 91 173 207 52 76 61 227 57 225 8 143 184 208 103 226 163 105 10 229 11 205 26 40 56 182 212 49 242 151 28 155 96 235 85 128 177 94 10 162 192 212 89 146 12 13]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629268
|
||||
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:13Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629273 [] [156 56 90 120 217 145 134 236 179 121 242 238 104 51 219 222 64 187 30 164 122 148 93 163 211 8 96 24 81 202 77 128 1 231 61 106 5 222 78 83 107 132 231 180 37 46 27 249 38 163 100 1 20 194 164 79 250 184 67 128 245 183 33 11]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629273
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:16Z","message":"witness report"}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:18Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629278 [] [148 48 28 250 242 138 131 204 57 180 146 119 232 43 28 103 82 127 68 120 144 129 9 191 47 7 8 202 130 176 222 24 120 15 164 128 112 142 10 29 31 199 183 181 88 221 74 63 198 40 64 75 46 116 24 82 1 227 205 50 223 185 20 7]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629278
|
||||
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:23Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629283 [] [127 128 124 54 133 149 56 98 160 205 142 167 223 216 219 41 136 215 118 172 104 237 75 1 225 217 135 30 40 43 152 175 85 33 249 226 136 173 48 227 188 201 209 97 164 71 109 153 173 177 238 1 48 5 74 240 155 80 44 7 191 217 182 14]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629283
|
||||
{"level":"info","time":"2026-04-08T06:21:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:28Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:29Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629289 [] [30 247 110 61 160 25 49 192 200 184 72 44 244 40 97 148 226 113 239 127 100 253 83 95 168 108 64 62 138 78 126 36 223 108 76 232 5 254 202 116 234 179 14 196 31 193 211 116 74 201 236 164 69 24 52 42 215 61 128 101 0 39 32 1]}
|
||||
sendPlanner 0
|
||||
{"level":"info","time":"2026-04-08T06:21:29Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629289
|
||||
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:34Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629294 [] [49 111 35 93 42 104 196 113 147 214 133 98 55 125 220 97 32 136 31 24 228 36 49 136 178 200 95 168 175 93 252 239 189 139 232 12 234 90 100 17 203 119 41 135 118 51 89 131 95 99 175 131 73 159 153 14 125 119 190 35 246 197 96 4]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629294
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:39Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629299 [] [22 128 127 139 144 129 120 137 46 77 249 144 78 154 135 105 172 70 0 188 41 99 232 139 111 193 17 70 128 119 139 221 92 230 34 230 82 234 71 108 11 11 40 34 211 126 80 63 188 67 226 171 7 120 181 168 42 118 92 160 128 209 199 10]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629299
|
||||
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:44Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629304 [] [240 170 201 48 157 246 93 160 52 84 119 29 39 64 210 83 155 211 24 189 11 57 75 69 91 101 83 150 55 146 51 208 254 60 128 151 238 99 45 153 9 195 139 123 122 137 206 45 100 249 55 98 85 215 194 33 95 71 164 99 160 223 90 0]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629304
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:49Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629309 [] [179 172 109 115 79 55 94 106 15 237 19 131 151 43 10 7 72 77 2 34 176 107 173 191 240 81 63 217 10 154 172 119 161 82 9 153 70 208 120 46 207 44 43 71 177 103 23 105 220 70 61 67 95 149 155 214 48 94 163 119 137 80 224 13]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629309
|
||||
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:54Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629314 [] [157 215 144 156 25 165 9 240 220 81 71 142 16 43 178 102 120 205 4 242 206 104 24 211 249 252 141 85 99 49 46 114 57 130 244 253 153 218 20 66 122 119 63 128 55 162 231 112 221 36 190 225 226 141 123 251 179 33 153 10 126 220 183 0]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629314
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:21:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:21:59Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629319 [] [60 10 119 166 57 198 44 211 37 69 210 60 151 227 116 48 167 100 105 231 170 221 65 184 148 189 136 19 169 39 34 7 37 208 1 83 93 0 228 219 223 53 217 129 90 48 204 117 198 150 228 38 176 211 173 125 120 251 96 135 166 205 21 1]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629319
|
||||
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:04Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629324 [] [70 8 80 144 29 14 138 41 69 121 183 136 249 69 130 66 211 97 58 92 106 206 212 70 237 239 13 77 76 117 254 238 246 54 200 102 187 178 44 236 56 30 151 255 102 199 67 189 142 93 22 105 48 96 134 147 132 166 5 34 99 206 89 1]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629324
|
||||
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:09Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629329 [] [34 176 30 255 58 157 174 41 8 33 88 101 25 86 132 135 160 119 211 45 145 130 83 234 66 160 58 3 81 135 9 81 179 111 163 85 47 200 180 172 172 159 200 27 230 110 158 250 1 21 74 78 91 47 12 92 100 101 114 216 179 56 6 1]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629329
|
||||
{"level":"info","time":"2026-04-08T06:22:14Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:14Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:15Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:15Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629335 [] [170 58 162 2 109 219 82 212 97 254 26 88 22 167 86 86 139 214 119 179 237 60 203 12 193 126 20 227 116 140 67 87 241 250 24 93 122 93 97 145 204 45 154 175 162 96 127 220 180 2 34 234 154 204 242 114 174 88 222 135 53 214 93 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629335
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:16Z","message":"witness report"}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:20Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629340 [] [238 195 43 183 107 63 168 220 253 184 160 179 158 178 165 18 63 242 162 211 74 185 22 66 56 139 189 236 91 229 162 94 244 74 125 229 156 74 43 108 66 229 181 160 106 103 210 105 242 151 125 116 18 166 133 112 194 78 64 41 159 213 119 8]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629340
|
||||
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:25Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629345 [] [3 253 149 166 87 72 223 116 17 174 119 244 178 113 199 143 95 24 132 160 245 22 104 98 161 183 22 219 187 58 98 27 57 165 159 130 132 178 53 13 165 200 94 31 47 53 199 131 61 24 128 177 212 196 170 189 253 49 214 144 120 221 99 15]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629345
|
||||
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:30Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629350 [] [181 210 168 89 33 8 181 117 214 50 72 135 24 44 202 145 53 30 123 114 245 156 170 254 171 22 52 205 63 37 232 187 153 176 71 181 226 14 56 123 223 243 12 180 207 196 14 139 71 87 34 27 184 156 1 131 231 154 114 40 207 99 24 9]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629350
|
||||
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:35Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629355 [] [206 59 19 160 32 126 18 156 87 174 206 32 59 177 100 105 90 226 227 101 156 201 16 46 46 26 125 91 87 146 186 183 156 169 151 96 96 26 24 110 149 82 157 99 228 235 168 74 239 244 91 241 143 124 229 6 30 197 49 233 48 208 80 15]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629355
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:40Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629360 [] [90 246 228 229 207 100 199 90 36 54 134 95 53 15 1 170 67 139 67 169 160 69 127 224 21 32 38 142 79 108 247 12 204 251 157 57 180 116 116 76 240 39 176 219 148 10 107 9 227 128 6 71 216 38 134 234 205 253 95 174 177 216 161 6]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629360
|
||||
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:45Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629365 [] [63 199 217 15 130 99 177 48 78 10 36 251 150 42 115 110 100 247 20 74 254 171 89 52 92 230 211 148 104 143 107 230 209 68 95 224 15 57 122 163 53 46 159 191 92 175 118 241 157 14 62 2 101 66 66 211 67 124 208 117 101 1 225 6]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629365
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:50Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629370 [] [91 181 191 156 49 87 26 57 12 57 27 55 225 180 28 39 16 57 126 224 26 10 145 93 71 252 245 81 173 203 116 234 36 138 219 93 165 15 130 246 116 128 146 151 255 128 140 210 112 225 129 5 45 10 198 125 98 135 1 89 144 115 60 11]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629370
|
||||
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:22:55Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629375 [] [168 228 101 110 246 181 204 230 69 13 111 254 247 246 184 225 72 223 92 222 179 241 100 165 195 74 14 198 45 136 87 171 93 156 118 186 49 2 58 187 123 117 79 124 61 11 119 124 53 2 95 121 29 251 73 217 212 191 200 176 139 228 215 13]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629375
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:22:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:23:00Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:00Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629381 [] [144 54 219 83 60 101 156 100 36 53 246 108 176 11 139 211 109 131 136 230 114 54 172 174 19 55 43 50 22 109 164 36 51 57 44 117 203 235 34 222 204 199 30 130 72 91 125 97 229 230 155 34 1 129 192 230 227 13 210 149 41 42 63 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629381
|
||||
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
{"level":"info","time":"2026-04-08T06:23:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629381 [] [144 54 219 83 60 101 156 100 36 53 246 108 176 11 139 211 109 131 136 230 114 54 172 174 19 55 43 50 22 109 164 36 51 57 44 117 203 235 34 222 204 199 30 130 72 91 125 97 229 230 155 34 1 129 192 230 227 13 210 149 41 42 63 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629381
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629386 [] [30 202 184 203 25 1 125 206 41 62 89 178 214 193 137 233 121 38 215 131 240 88 92 129 180 41 181 33 6 54 118 64 216 128 3 248 193 219 65 6 29 91 17 247 40 25 133 7 116 66 66 64 183 35 150 196 120 248 154 133 92 80 138 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629386
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629386 [] [30 202 184 203 25 1 125 206 41 62 89 178 214 193 137 233 121 38 215 131 240 88 92 129 180 41 181 33 6 54 118 64 216 128 3 248 193 219 65 6 29 91 17 247 40 25 133 7 116 66 66 64 183 35 150 196 120 248 154 133 92 80 138 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629386
|
||||
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629391 [] [110 205 129 253 77 31 152 6 78 24 42 124 97 64 89 83 228 237 184 188 238 96 174 104 50 102 124 130 86 94 197 8 234 175 51 108 62 143 173 174 237 85 151 24 68 87 56 55 38 225 233 70 50 80 66 236 241 165 134 78 242 88 168 10]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629391
|
||||
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:12Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629392 [] [75 116 113 253 77 139 233 209 46 112 155 197 247 65 52 60 137 142 105 1 205 163 117 18 158 109 93 196 162 238 246 20 224 91 203 238 197 189 191 221 154 16 149 245 45 66 27 211 84 170 9 125 137 77 171 165 232 188 216 242 164 215 76 11]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629392
|
||||
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629396 [] [240 147 198 201 116 215 160 95 148 236 135 110 30 97 4 108 212 135 243 192 27 44 61 14 30 116 20 247 191 1 103 228 225 64 12 0 102 20 146 104 141 143 144 245 158 186 83 98 232 170 7 24 148 190 174 234 237 77 212 157 73 109 118 9]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629396
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:16Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:17Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629397 [] [144 164 56 237 29 110 226 88 72 232 64 152 15 204 134 122 155 214 86 191 104 104 233 231 117 60 0 46 236 113 98 65 83 58 9 168 130 155 188 250 239 51 192 170 242 103 173 236 91 177 224 13 161 196 219 48 139 206 90 53 98 20 239 14]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629397
|
||||
{"level":"info","time":"2026-04-08T06:23:21Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:21Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:22Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629402 [] [160 52 205 176 18 24 15 168 91 245 68 74 111 206 79 223 168 65 78 51 228 33 168 160 119 90 93 100 197 188 182 5 78 203 114 164 154 38 38 109 105 5 1 164 176 177 80 180 246 148 224 27 197 112 27 163 148 255 141 147 34 188 76 12]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629402
|
||||
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:27Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629407 [] [88 59 221 55 52 217 233 119 150 192 131 111 42 48 14 16 50 136 74 47 169 197 90 201 55 244 224 159 96 11 247 141 130 162 158 159 61 223 1 92 102 220 144 6 193 206 130 184 122 35 192 144 233 156 4 162 32 148 54 38 138 173 20 3]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629407
|
||||
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629412 [] [182 88 119 236 225 31 252 201 108 139 33 209 96 200 95 175 113 90 100 161 245 96 236 77 177 94 27 202 73 29 24 144 53 105 98 119 246 73 194 77 161 49 76 129 106 24 21 143 253 80 129 124 120 252 135 88 134 135 207 157 192 2 219 4]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629412
|
||||
{"level":"info","time":"2026-04-08T06:23:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:36Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629416 [] [10 244 75 241 90 17 201 160 103 126 22 191 185 54 29 248 245 177 134 52 150 112 55 146 241 110 87 175 229 109 156 206 245 219 178 64 104 78 24 182 65 254 218 15 86 33 182 25 92 108 83 228 153 76 244 150 108 234 21 39 87 196 146 12]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629416
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:23:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:41Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629421 [] [245 147 119 148 73 234 140 156 142 197 2 195 72 36 158 115 145 114 155 37 71 197 182 145 61 187 183 194 224 168 39 133 73 122 207 153 51 18 253 144 63 88 138 225 211 61 26 10 239 110 143 97 150 52 228 80 245 159 248 225 56 187 75 4]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629421
|
||||
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:46Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629426 [] [141 72 153 198 16 160 45 156 136 192 30 121 181 17 191 121 164 1 21 178 18 144 234 140 250 86 200 10 239 124 55 214 17 37 197 65 192 47 211 6 77 242 33 240 223 228 13 22 34 198 107 128 182 220 178 184 54 133 169 166 101 156 102 5]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629426
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629431 [] [12 54 17 250 235 119 198 198 176 220 73 25 17 129 109 77 21 5 79 143 223 236 101 253 67 236 125 152 154 33 35 141 10 94 10 208 18 2 133 244 145 2 255 82 255 205 44 161 50 254 199 50 91 112 80 182 206 2 185 114 253 92 92 14]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629431
|
||||
{"level":"info","time":"2026-04-08T06:23:51Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:23:56Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629436 [] [23 83 186 19 183 6 46 206 59 164 212 169 55 102 13 73 224 1 161 53 196 143 252 186 57 92 58 65 207 137 186 81 172 118 242 143 197 229 171 7 29 127 80 235 182 193 92 104 137 158 182 102 18 242 121 191 29 230 30 197 136 144 135 1]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629436
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:56Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:23:56Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:01Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629441 [] [254 13 94 37 111 178 40 255 29 162 65 35 169 202 46 185 196 213 241 189 215 208 215 53 142 226 11 120 36 151 74 60 48 255 214 188 71 71 113 85 96 110 211 213 22 24 115 27 172 118 188 154 89 147 143 149 202 193 240 123 139 19 87 6]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629441
|
||||
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:06Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629446 [] [246 176 91 73 160 106 186 232 90 254 21 33 188 36 82 137 87 157 180 212 249 215 0 194 143 19 45 211 65 107 64 50 225 228 242 113 78 152 136 187 19 134 178 53 244 118 233 41 188 102 161 95 32 120 87 154 4 111 222 114 195 6 255 8]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629446
|
||||
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:11Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629451 [] [86 184 139 175 162 65 132 180 192 217 216 94 173 156 155 61 160 135 254 150 36 163 185 29 90 201 55 156 154 112 215 85 170 73 89 44 48 50 216 26 48 176 49 251 55 59 41 110 255 205 249 121 252 13 178 191 238 221 153 87 167 160 109 15]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629451
|
||||
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:16Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629456 [] [155 167 250 33 26 113 255 90 195 252 218 51 87 156 236 27 33 235 173 205 214 91 71 182 173 208 4 34 56 187 252 51 242 25 188 172 144 173 139 254 206 169 130 128 76 124 221 225 175 107 51 5 195 217 190 89 183 197 214 233 225 144 22 9]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629456
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:16Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:16Z","message":"witness report"}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:21Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629461 [] [223 59 241 174 47 85 114 207 253 22 199 23 113 236 175 59 178 219 243 5 86 61 120 204 45 48 23 18 133 213 233 207 215 112 60 211 172 117 141 207 212 195 38 176 99 210 36 167 4 173 3 172 118 230 68 15 115 1 34 117 29 43 17 10]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629461
|
||||
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:26Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629466 [] [78 233 88 195 228 51 61 74 210 130 161 187 123 96 35 249 102 247 231 145 230 218 95 156 156 57 196 239 51 100 156 207 145 191 177 198 148 187 45 255 188 135 168 246 45 232 145 167 16 227 212 162 240 83 53 227 19 50 4 155 89 251 173 15]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629466
|
||||
{"level":"info","time":"2026-04-08T06:24:31Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:31Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:32Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629472 [] [248 73 18 53 38 161 4 235 67 154 166 207 190 24 82 159 151 63 131 28 138 151 149 208 177 1 148 50 52 20 130 170 147 166 102 151 65 33 160 85 94 206 117 136 17 45 102 242 151 27 234 24 220 119 242 246 63 118 124 180 251 201 150 2]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629472
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:36Z","message":"witness report"}
|
||||
{"level":"debug","witness":"12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw","bornAt_ok":false,"fill_ok":true,"time":"2026-04-08T06:24:36Z","message":"witness report"}
|
||||
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:37Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629477 [] [18 59 128 116 59 142 226 74 128 34 172 184 42 157 167 147 209 86 3 76 58 71 29 198 127 217 243 106 0 8 233 13 28 54 16 255 138 41 114 125 164 222 219 216 190 57 22 197 217 128 65 197 209 136 44 83 82 207 68 42 252 246 13 7]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629477
|
||||
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629482 [] [215 29 21 255 33 54 189 86 117 184 2 113 112 242 70 247 207 146 67 145 195 86 28 111 185 44 127 169 88 232 89 178 126 185 103 154 58 53 235 247 245 64 28 23 33 6 145 58 32 230 73 234 168 227 137 107 191 80 91 197 194 61 166 8]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
{"level":"info","time":"2026-04-08T06:24:42Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629482
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
ACTUALLY RELATED INDEXERS map[12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u:{12D3KooWC3GNStak8KCYtJq11Dxiq45EJV53z1ZvKetMcZBeBX6u: [/ip4/172.19.0.14/tcp/4002 /ip4/172.40.0.2/tcp/4002]} 12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu:{12D3KooWGn3j4XqTSrjJDGGpTQERdDV5TPZdhQp87rAUnvQssvQu: [/ip4/172.40.0.1/tcp/4001]}] 2
|
||||
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:47Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629487 [] [5 82 17 18 126 200 221 134 181 2 49 50 91 164 41 106 249 127 229 134 172 39 47 89 85 69 10 130 162 118 200 8 236 130 234 199 170 129 48 35 152 51 170 110 224 157 113 148 82 30 50 142 147 145 248 90 246 214 192 216 202 228 209 7]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629487
|
||||
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
{"level":"info","time":"2026-04-08T06:24:52Z","message":"Catching propalgation event... oc-scheduler - "}
|
||||
handleEvent
|
||||
sendPlanner &{/opencloud/resource/planner/1.0 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw [] -1 1775629492 [] [131 245 52 32 139 115 185 77 202 252 164 122 78 64 57 34 31 59 111 170 69 186 52 182 219 102 57 5 93 169 20 201 204 107 19 124 212 239 19 134 62 200 86 206 54 214 124 41 155 206 243 91 91 215 19 73 230 132 139 220 91 43 196 12]}
|
||||
sendPlanner 0
|
||||
PublishCommon 12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw
|
||||
SEND EVENT {12D3KooWBh9kZrekBAE5G33q4jCLNRAzygem3gP1mMdK8mhoCTaw: [/ip4/172.40.0.3/tcp/4003]} /opencloud/resource/planner/1.0 12D3KooWSzQtBux5GkpdqK8MA9Rmo5W1vTVZhWCbut2k99Ge45GN -1 1775629492
|
||||
14
main.go
14
main.go
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"oc-discovery/conf"
|
||||
"oc-discovery/daemons/node"
|
||||
@@ -21,34 +20,35 @@ func main() {
|
||||
oclib.InitDaemon(appname)
|
||||
// get the right config file
|
||||
|
||||
o := oclib.GetConfLoader()
|
||||
o := oclib.GetConfLoader(appname)
|
||||
|
||||
conf.GetConfig().Name = o.GetStringDefault("NAME", "opencloud-demo")
|
||||
conf.GetConfig().Hostname = o.GetStringDefault("HOSTNAME", "127.0.0.1")
|
||||
conf.GetConfig().PSKPath = o.GetStringDefault("PSK_PATH", "./psk/psk.key")
|
||||
conf.GetConfig().NodeEndpointPort = o.GetInt64Default("NODE_ENDPOINT_PORT", 4001)
|
||||
conf.GetConfig().PublicKeyPath = o.GetStringDefault("PUBLIC_KEY_PATH", "./pem/public.pem")
|
||||
conf.GetConfig().PrivateKeyPath = o.GetStringDefault("PRIVATE_KEY_PATH", "./pem/private.pem")
|
||||
conf.GetConfig().IndexerAddresses = o.GetStringDefault("INDEXER_ADDRESSES", "")
|
||||
|
||||
conf.GetConfig().NanoIDS = o.GetStringDefault("NANO_IDS", "")
|
||||
conf.GetConfig().PeerIDS = o.GetStringDefault("PEER_IDS", "")
|
||||
|
||||
conf.GetConfig().NodeMode = o.GetStringDefault("NODE_MODE", "node")
|
||||
|
||||
conf.GetConfig().MinIndexer = o.GetIntDefault("MIN_INDEXER", 1)
|
||||
conf.GetConfig().MaxIndexer = o.GetIntDefault("MAX_INDEXER", 5)
|
||||
conf.GetConfig().LocationGranularity = o.GetIntDefault("LOCATION_GRANULARITY", 2)
|
||||
|
||||
ctx, stop := signal.NotifyContext(
|
||||
context.Background(),
|
||||
os.Interrupt,
|
||||
syscall.SIGTERM,
|
||||
)
|
||||
defer stop()
|
||||
fmt.Println(conf.GetConfig().NodeMode)
|
||||
isNode := strings.Contains(conf.GetConfig().NodeMode, "node")
|
||||
isIndexer := strings.Contains(conf.GetConfig().NodeMode, "indexer")
|
||||
|
||||
if n, err := node.InitNode(isNode, isIndexer); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
<-ctx.Done() // 👈 the only blocking point
|
||||
<-ctx.Done() // the only blocking point
|
||||
log.Println("shutting down")
|
||||
n.Close()
|
||||
}
|
||||
|
||||
3
pem/private6.pem
Normal file
3
pem/private6.pem
Normal file
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MC4CAQAwBQYDK2VwBCIEIDo9ZgsqkIxu4Zhk4WY1xa4va1yO3Z6RuXU4K5+amwxE
|
||||
-----END PRIVATE KEY-----
|
||||
3
pem/public6.pem
Normal file
3
pem/public6.pem
Normal file
@@ -0,0 +1,3 @@
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MCowBQYDK2VwAyEAGp7zNNHz0fgb71WWJUGilpiONKIMk24MjCx6jJ7CTp8=
|
||||
-----END PUBLIC KEY-----
|
||||
Reference in New Issue
Block a user