Files
indra/pkg/engine/peerstore_test.go
l0k18 26005c9c2e peer metadata store test improved
problem has come up that the package we are using does not expose the iterator of the underlying kv store so a wrapper will be made to embed it and add this functionality to the back end exposed.
2023-07-21 11:22:36 +01:00

134 lines
3.7 KiB
Go

package engine
import (
"context"
"github.com/indra-labs/indra"
"testing"
"time"
log2 "github.com/indra-labs/indra/pkg/proc/log"
)
func pauza() {
time.Sleep(time.Second)
}
//
// func TestEngine_PeerStore(t *testing.T) {
// if indra.CI == "false" {
// log2.SetLogLevel(log2.Trace)
// }
// const nTotal = 10
// var e error
// var engines []*Engine
// var cleanup func()
// ctx, _ := context.WithCancel(context.Background())
// engines, cleanup, e = CreateAndStartMockEngines(nTotal, ctx)
// adz := engines[0].Listener.Host.Addrs()
// addrs := make([]*netip.AddrPort, len(adz))
// for i := range adz {
// addy, _ := multi.AddrToAddrPort(adz[i])
// addrs[i] = &addy
// }
// // To ensure every peer will get the gossip:
// pauza()
// newAddressAd := addresses.New(nonce.NewID(),
// engines[0].Mgr().GetLocalNodeIdentityPrv(),
// addrs,
// time.Now().Add(time.Hour*24*7))
// sa := splice.New(newAddressAd.Len())
// if e = newAddressAd.Encode(sa); fails(e) {
// t.FailNow()
// }
// if e = engines[0].SendAd(sa.GetAll()); fails(e) {
// t.FailNow()
// }
// newIntroAd := intro.New(nonce.NewID(),
// engines[0].Mgr().GetLocalNodeIdentityPrv(),
// engines[0].Mgr().GetLocalNode().Identity.Pub,
// 20000, 443,
// time.Now().Add(time.Hour*24*7))
// si := splice.New(newIntroAd.Len())
// if e = newIntroAd.Encode(si); fails(e) {
// t.FailNow()
// }
// if e = engines[0].SendAd(si.GetAll()); fails(e) {
// t.FailNow()
// }
// newLoadAd := load.New(nonce.NewID(),
// engines[0].Mgr().GetLocalNodeIdentityPrv(),
// 17,
// time.Now().Add(time.Hour*24*7))
// sl := splice.New(newLoadAd.Len())
// if e = newLoadAd.Encode(sl); fails(e) {
// t.FailNow()
// }
// if e = engines[0].SendAd(sl.GetAll()); fails(e) {
// t.FailNow()
// }
// newPeerAd := peer.New(nonce.NewID(),
// engines[0].Mgr().GetLocalNodeIdentityPrv(),
// 20000,
// time.Now().Add(time.Hour*24*7))
// sp := splice.New(newPeerAd.Len())
// if e = newPeerAd.Encode(sp); fails(e) {
// t.FailNow()
// }
// if e = engines[0].SendAd(sp.GetAll()); fails(e) {
// t.FailNow()
// }
// newServiceAd := services.New(nonce.NewID(),
// engines[0].Mgr().GetLocalNodeIdentityPrv(),
// []services.Service{{20000, 54321}, {10000, 42221}},
// time.Now().Add(time.Hour*24*7))
// ss := splice.New(newServiceAd.Len())
// if e = newServiceAd.Encode(ss); fails(e) {
// t.FailNow()
// }
// if e = engines[0].SendAd(ss.GetAll()); fails(e) {
// t.FailNow()
// }
// pauza()
// cleanup()
// }
func TestEngine_PeerStoreDiscovery(t *testing.T) {
// This test doesn't have a concrete failure mode, as inevitably the nodes
// that start in the first half of the set, and the nodes in the second
// half, the latter may miss the former's advertisements. If the functions
// are broken this will fail however, as this is the conditions that trigger
// a failure.
//
// It is more for demonstration to see that the gossip is indeed propagating
// in the first round as nodes start up, and more comprehensive tests by
// nature need a lot more time and at least a second update of the full set
// to saturate all peers with their peers advertisements.
if indra.CI == "false" {
log2.SetLogLevel(log2.Trace)
}
const nTotal = 10
var e error
var engines []*Engine
var cleanup func()
ctx, _ := context.WithCancel(context.Background())
if engines, cleanup, e = CreateAndStartMockEngines(nTotal, ctx); fails(e) {
t.FailNow()
}
time.Sleep(time.Second * 3)
// Send them all again after a bit to make sure everyone gets them.
for i := range engines {
if e = engines[i].SendAds(); fails(e) {
t.FailNow()
}
}
time.Sleep(time.Second * 3)
for i := range engines {
_ = i
// check that all peers now have nTotal-1 distinct peer ads (of all 4
// types)
}
cleanup()
pauza()
}