183 lines
5.1 KiB
Go
183 lines
5.1 KiB
Go
package common
|
||
|
||
import (
|
||
"context"
|
||
cr "crypto/rand"
|
||
"io"
|
||
"net"
|
||
"slices"
|
||
"time"
|
||
|
||
"github.com/libp2p/go-libp2p/core/host"
|
||
pp "github.com/libp2p/go-libp2p/core/peer"
|
||
)
|
||
|
||
const MaxExpectedMbps = 100.0
|
||
const MinPayloadChallenge = 512
|
||
const MaxPayloadChallenge = 2048
|
||
const BaseRoundTrip = 400 * time.Millisecond
|
||
|
||
type UptimeTracker struct {
|
||
FirstSeen time.Time
|
||
LastSeen time.Time
|
||
TotalOnline time.Duration
|
||
}
|
||
|
||
// RecordHeartbeat accumulates online time gap-aware: only counts the interval if
|
||
// the gap since the last heartbeat is within 2× the recommended interval (i.e. no
|
||
// extended outage). Call this each time a heartbeat is successfully processed.
|
||
func (u *UptimeTracker) RecordHeartbeat() {
|
||
now := time.Now().UTC()
|
||
if !u.LastSeen.IsZero() {
|
||
gap := now.Sub(u.LastSeen)
|
||
if gap <= 2*RecommendedHeartbeatInterval {
|
||
u.TotalOnline += gap
|
||
}
|
||
}
|
||
u.LastSeen = now
|
||
}
|
||
|
||
func (u *UptimeTracker) Uptime() time.Duration {
|
||
return time.Since(u.FirstSeen)
|
||
}
|
||
|
||
// UptimeRatio returns the fraction of tracked lifetime during which the peer was
|
||
// continuously online (gap ≤ 2×RecommendedHeartbeatInterval). Returns 0 before
|
||
// the first heartbeat interval has elapsed.
|
||
func (u *UptimeTracker) UptimeRatio() float64 {
|
||
total := time.Since(u.FirstSeen)
|
||
if total <= 0 {
|
||
return 0
|
||
}
|
||
ratio := float64(u.TotalOnline) / float64(total)
|
||
if ratio > 1 {
|
||
ratio = 1
|
||
}
|
||
return ratio
|
||
}
|
||
|
||
func (u *UptimeTracker) IsEligible(min time.Duration) bool {
|
||
return u.Uptime() >= min
|
||
}
|
||
|
||
// getBandwidthChallengeRate opens a dedicated ProtocolBandwidthProbe stream to
|
||
// remotePeer, sends a random payload, reads the echo, and computes throughput
|
||
// and a latency score. Returns (ok, bpms, latencyScore, error).
|
||
// latencyScore is 1.0 when RTT is very fast and 0.0 when at or beyond maxRoundTrip.
|
||
// Using a separate stream avoids mixing binary data on the JSON heartbeat stream
|
||
// and ensures the echo handler is actually running on the remote side.
|
||
func getBandwidthChallengeRate(h host.Host, remotePeer pp.ID, payloadSize int) (bool, float64, float64, error) {
|
||
payload := make([]byte, payloadSize)
|
||
if _, err := cr.Read(payload); err != nil {
|
||
return false, 0, 0, err
|
||
}
|
||
|
||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||
defer cancel()
|
||
s, err := h.NewStream(ctx, remotePeer, ProtocolBandwidthProbe)
|
||
if err != nil {
|
||
return false, 0, 0, err
|
||
}
|
||
defer s.Reset()
|
||
s.SetDeadline(time.Now().Add(10 * time.Second))
|
||
start := time.Now()
|
||
if _, err = s.Write(payload); err != nil {
|
||
return false, 0, 0, err
|
||
}
|
||
s.CloseWrite()
|
||
// Half-close the write side so the handler's io.Copy sees EOF and stops.
|
||
// Read the echo.
|
||
response := make([]byte, payloadSize)
|
||
if _, err = io.ReadFull(s, response); err != nil {
|
||
return false, 0, 0, err
|
||
}
|
||
|
||
duration := time.Since(start)
|
||
maxRoundTrip := BaseRoundTrip + (time.Duration(payloadSize) * (100 * time.Millisecond))
|
||
mbps := float64(payloadSize*8) / duration.Seconds() / 1e6
|
||
|
||
// latencyScore: 1.0 = instant, 0.0 = at maxRoundTrip or beyond.
|
||
latencyScore := 1.0 - float64(duration)/float64(maxRoundTrip)
|
||
if latencyScore < 0 {
|
||
latencyScore = 0
|
||
}
|
||
if latencyScore > 1 {
|
||
latencyScore = 1
|
||
}
|
||
|
||
if duration > maxRoundTrip || mbps < 5.0 {
|
||
return false, float64(mbps / MaxExpectedMbps), latencyScore, nil
|
||
}
|
||
return true, float64(mbps / MaxExpectedMbps), latencyScore, nil
|
||
}
|
||
|
||
func getDiversityRate(h host.Host, peers []string) float64 {
|
||
peers, _ = checkPeers(h, peers)
|
||
diverse := []string{}
|
||
for _, p := range peers {
|
||
ip, err := ExtractIP(p)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
div := ip.Mask(net.CIDRMask(24, 32)).String()
|
||
if !slices.Contains(diverse, div) {
|
||
diverse = append(diverse, div)
|
||
}
|
||
}
|
||
if len(diverse) == 0 || len(peers) == 0 {
|
||
return 1
|
||
}
|
||
return float64(len(diverse)) / float64(len(peers))
|
||
}
|
||
|
||
// getOwnDiversityRate measures subnet /24 diversity of the indexer's own connected peers.
|
||
// This evaluates the indexer's network position rather than the connecting node's topology.
|
||
func getOwnDiversityRate(h host.Host) float64 {
|
||
diverse := map[string]struct{}{}
|
||
total := 0
|
||
for _, pid := range h.Network().Peers() {
|
||
for _, maddr := range h.Peerstore().Addrs(pid) {
|
||
total++
|
||
ip, err := ExtractIP(maddr.String())
|
||
if err != nil {
|
||
continue
|
||
}
|
||
diverse[ip.Mask(net.CIDRMask(24, 32)).String()] = struct{}{}
|
||
}
|
||
}
|
||
if total == 0 {
|
||
return 1
|
||
}
|
||
return float64(len(diverse)) / float64(total)
|
||
}
|
||
|
||
func checkPeers(h host.Host, peers []string) ([]string, []string) {
|
||
concretePeer := []string{}
|
||
ips := []string{}
|
||
for _, p := range peers {
|
||
ad, err := pp.AddrInfoFromString(p)
|
||
if err != nil {
|
||
continue
|
||
}
|
||
if PeerIsAlive(h, *ad) {
|
||
concretePeer = append(concretePeer, p)
|
||
if ip, err := ExtractIP(p); err == nil {
|
||
ips = append(ips, ip.Mask(net.CIDRMask(24, 32)).String())
|
||
}
|
||
}
|
||
}
|
||
return concretePeer, ips
|
||
}
|
||
|
||
// dynamicMinScore returns the minimum acceptable score for a peer, starting
|
||
// permissive (20%) for brand-new peers and hardening linearly to 80% over 24h.
|
||
// This prevents ejecting newcomers in fresh networks while filtering parasites.
|
||
func dynamicMinScore(age time.Duration) float64 {
|
||
hours := age.Hours()
|
||
score := 20.0 + 60.0*(hours/24.0)
|
||
if score > 80.0 {
|
||
score = 80.0
|
||
}
|
||
return score
|
||
}
|