Merge stegasoo (v4.3.0, steganography) and verisoo (v0.1.0, attestation) as subpackages under soosef.stegasoo and soosef.verisoo. This eliminates cross-repo coordination and enables atomic changes across the full stack. - Copy stegasoo (34 modules) and verisoo (15 modules) into src/soosef/ - Convert all verisoo absolute imports to relative imports - Rewire ~50 import sites across soosef code (cli, web, keystore, tests) - Replace stegasoo/verisoo pip deps with inlined code + pip extras (stego-dct, stego-audio, attest, web, api, cli, fieldkit, all, dev) - Add _availability.py for runtime feature detection - Add unified FastAPI mount point at soosef.api - Copy and adapt tests from both repos (155 pass, 1 skip) - Drop standalone CLI/web frontends; keep FastAPI as optional modules - Both source repos tagged pre-monorepo-consolidation on GitHub Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
319 lines
10 KiB
Python
319 lines
10 KiB
Python
"""
|
|
Federation and gossip protocol for Verisoo.
|
|
|
|
Nodes sync their merkle logs via gossip:
|
|
1. Periodically exchange merkle roots with peers
|
|
2. If roots differ, request consistency proof
|
|
3. Fetch missing records and append to local log
|
|
|
|
Eventually consistent - "eventually" depends on gossip frequency and topology.
|
|
No central coordinator, no leader election, no consensus protocol.
|
|
Just append-only logs that converge.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import asyncio
|
|
import hashlib
|
|
import json
|
|
import logging
|
|
from dataclasses import dataclass, field
|
|
from datetime import datetime, timezone
|
|
from typing import Callable, Protocol
|
|
from urllib.parse import urlparse
|
|
|
|
from .exceptions import FederationError
|
|
from .merkle import ConsistencyProof, InclusionProof, MerkleLog
|
|
from .models import AttestationRecord
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@dataclass
|
|
class PeerInfo:
|
|
"""Information about a federation peer."""
|
|
|
|
url: str
|
|
fingerprint: str # Ed25519 fingerprint for peer authentication
|
|
last_seen: datetime | None = None
|
|
last_root: str | None = None
|
|
last_size: int = 0
|
|
healthy: bool = True
|
|
consecutive_failures: int = 0
|
|
|
|
|
|
@dataclass
|
|
class SyncStatus:
|
|
"""Result of a sync attempt with a peer."""
|
|
|
|
peer: str
|
|
success: bool
|
|
records_received: int = 0
|
|
our_size_before: int = 0
|
|
our_size_after: int = 0
|
|
their_size: int = 0
|
|
error: str | None = None
|
|
|
|
|
|
class PeerTransport(Protocol):
|
|
"""Protocol for peer communication."""
|
|
|
|
async def get_status(self, peer: PeerInfo) -> tuple[str, int]:
|
|
"""Get peer's current root hash and log size."""
|
|
...
|
|
|
|
async def get_records(
|
|
self, peer: PeerInfo, start_index: int, count: int
|
|
) -> list[AttestationRecord]:
|
|
"""Fetch records from peer."""
|
|
...
|
|
|
|
async def get_consistency_proof(
|
|
self, peer: PeerInfo, old_size: int
|
|
) -> ConsistencyProof:
|
|
"""Get proof that peer's log is consistent with ours."""
|
|
...
|
|
|
|
async def push_records(
|
|
self, peer: PeerInfo, records: list[AttestationRecord]
|
|
) -> int:
|
|
"""Push records to peer, returns number accepted."""
|
|
...
|
|
|
|
|
|
class GossipNode:
|
|
"""
|
|
A node in the Verisoo federation network.
|
|
|
|
Manages:
|
|
- Local merkle log
|
|
- Peer connections
|
|
- Periodic sync via gossip
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
log: MerkleLog,
|
|
transport: PeerTransport,
|
|
node_id: str | None = None,
|
|
) -> None:
|
|
self.log = log
|
|
self.transport = transport
|
|
self.node_id = node_id or self._generate_node_id()
|
|
self.peers: dict[str, PeerInfo] = {}
|
|
self._running = False
|
|
self._sync_task: asyncio.Task | None = None
|
|
|
|
def add_peer(self, url: str, fingerprint: str) -> None:
|
|
"""Register a peer for gossip."""
|
|
peer = PeerInfo(url=url, fingerprint=fingerprint)
|
|
self.peers[url] = peer
|
|
logger.info(f"Added peer: {url} ({fingerprint[:16]}...)")
|
|
|
|
def remove_peer(self, url: str) -> None:
|
|
"""Remove a peer from gossip."""
|
|
if url in self.peers:
|
|
del self.peers[url]
|
|
logger.info(f"Removed peer: {url}")
|
|
|
|
async def start(self, interval_seconds: float = 60.0) -> None:
|
|
"""Start the gossip loop."""
|
|
if self._running:
|
|
return
|
|
|
|
self._running = True
|
|
self._sync_task = asyncio.create_task(self._gossip_loop(interval_seconds))
|
|
logger.info(f"Gossip started, interval={interval_seconds}s")
|
|
|
|
async def stop(self) -> None:
|
|
"""Stop the gossip loop."""
|
|
self._running = False
|
|
if self._sync_task:
|
|
self._sync_task.cancel()
|
|
try:
|
|
await self._sync_task
|
|
except asyncio.CancelledError:
|
|
pass
|
|
logger.info("Gossip stopped")
|
|
|
|
async def sync_with_peer(self, peer_url: str) -> SyncStatus:
|
|
"""
|
|
Sync with a specific peer.
|
|
|
|
1. Get their status (root, size)
|
|
2. If they have more records, fetch them
|
|
3. Verify consistency before accepting
|
|
4. Append new records to our log
|
|
"""
|
|
peer = self.peers.get(peer_url)
|
|
if not peer:
|
|
return SyncStatus(
|
|
peer=peer_url,
|
|
success=False,
|
|
error="Unknown peer",
|
|
)
|
|
|
|
our_size_before = self.log.size
|
|
|
|
try:
|
|
# Get peer status
|
|
their_root, their_size = await self.transport.get_status(peer)
|
|
|
|
# Update peer info
|
|
peer.last_seen = datetime.now(timezone.utc)
|
|
peer.last_root = their_root
|
|
peer.last_size = their_size
|
|
|
|
# Already in sync?
|
|
if their_size <= our_size_before:
|
|
if their_root == self.log.root_hash:
|
|
peer.healthy = True
|
|
peer.consecutive_failures = 0
|
|
return SyncStatus(
|
|
peer=peer_url,
|
|
success=True,
|
|
our_size_before=our_size_before,
|
|
our_size_after=our_size_before,
|
|
their_size=their_size,
|
|
)
|
|
|
|
# They have records we don't - fetch them
|
|
if their_size > our_size_before:
|
|
# First verify consistency
|
|
if our_size_before > 0:
|
|
proof = await self.transport.get_consistency_proof(peer, our_size_before)
|
|
if not self._verify_consistency(proof):
|
|
raise FederationError(
|
|
f"Peer {peer_url} failed consistency check - possible fork"
|
|
)
|
|
|
|
# Fetch new records
|
|
new_records = await self.transport.get_records(
|
|
peer, our_size_before, their_size - our_size_before
|
|
)
|
|
|
|
# Append to our log
|
|
for record in new_records:
|
|
self.log.append(record)
|
|
|
|
peer.healthy = True
|
|
peer.consecutive_failures = 0
|
|
|
|
return SyncStatus(
|
|
peer=peer_url,
|
|
success=True,
|
|
records_received=their_size - our_size_before,
|
|
our_size_before=our_size_before,
|
|
our_size_after=self.log.size,
|
|
their_size=their_size,
|
|
)
|
|
|
|
except Exception as e:
|
|
peer.consecutive_failures += 1
|
|
if peer.consecutive_failures >= 3:
|
|
peer.healthy = False
|
|
logger.warning(f"Peer {peer_url} marked unhealthy after 3 failures")
|
|
|
|
return SyncStatus(
|
|
peer=peer_url,
|
|
success=False,
|
|
error=str(e),
|
|
our_size_before=our_size_before,
|
|
our_size_after=self.log.size,
|
|
)
|
|
|
|
async def broadcast_record(self, record: AttestationRecord) -> dict[str, bool]:
|
|
"""
|
|
Push a new record to all peers.
|
|
|
|
Returns dict of peer_url -> success.
|
|
"""
|
|
results = {}
|
|
for url, peer in self.peers.items():
|
|
if not peer.healthy:
|
|
results[url] = False
|
|
continue
|
|
|
|
try:
|
|
accepted = await self.transport.push_records(peer, [record])
|
|
results[url] = accepted > 0
|
|
except Exception as e:
|
|
logger.warning(f"Failed to push to {url}: {e}")
|
|
results[url] = False
|
|
|
|
return results
|
|
|
|
async def _gossip_loop(self, interval: float) -> None:
|
|
"""Background task that periodically syncs with peers."""
|
|
while self._running:
|
|
try:
|
|
await self._gossip_round()
|
|
except Exception as e:
|
|
logger.error(f"Gossip round failed: {e}")
|
|
|
|
await asyncio.sleep(interval)
|
|
|
|
async def _gossip_round(self) -> None:
|
|
"""One round of gossip with all healthy peers."""
|
|
healthy_peers = [p for p in self.peers.values() if p.healthy]
|
|
if not healthy_peers:
|
|
return
|
|
|
|
# Sync with all healthy peers concurrently
|
|
tasks = [self.sync_with_peer(p.url) for p in healthy_peers]
|
|
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
|
|
# Log summary
|
|
success_count = sum(
|
|
1 for r in results if isinstance(r, SyncStatus) and r.success
|
|
)
|
|
logger.debug(f"Gossip round: {success_count}/{len(healthy_peers)} peers synced")
|
|
|
|
def _verify_consistency(self, proof: ConsistencyProof) -> bool:
|
|
"""Verify a consistency proof from a peer."""
|
|
# Simplified: trust the proof structure for now
|
|
# Full implementation would verify the merkle path
|
|
return proof.old_size <= self.log.size
|
|
|
|
def _generate_node_id(self) -> str:
|
|
"""Generate a random node ID."""
|
|
import secrets
|
|
|
|
return hashlib.sha256(secrets.token_bytes(32)).hexdigest()[:16]
|
|
|
|
|
|
# Placeholder for HTTP transport implementation
|
|
class HttpTransport:
|
|
"""
|
|
HTTP-based peer transport.
|
|
|
|
Endpoints expected on peers:
|
|
- GET /status -> {"root": "...", "size": N}
|
|
- GET /records?start=N&count=M -> [records...]
|
|
- GET /consistency-proof?old_size=N -> proof
|
|
- POST /records -> accept records, return count
|
|
"""
|
|
|
|
def __init__(self, timeout: float = 30.0) -> None:
|
|
self.timeout = timeout
|
|
# Will use aiohttp when federation extra is installed
|
|
|
|
async def get_status(self, peer: PeerInfo) -> tuple[str, int]:
|
|
"""Get peer's current root hash and log size."""
|
|
raise NotImplementedError("Install verisoo[federation] for HTTP transport")
|
|
|
|
async def get_records(
|
|
self, peer: PeerInfo, start_index: int, count: int
|
|
) -> list[AttestationRecord]:
|
|
raise NotImplementedError("Install verisoo[federation] for HTTP transport")
|
|
|
|
async def get_consistency_proof(
|
|
self, peer: PeerInfo, old_size: int
|
|
) -> ConsistencyProof:
|
|
raise NotImplementedError("Install verisoo[federation] for HTTP transport")
|
|
|
|
async def push_records(
|
|
self, peer: PeerInfo, records: list[AttestationRecord]
|
|
) -> int:
|
|
raise NotImplementedError("Install verisoo[federation] for HTTP transport")
|