establishing connections between seeds and peers on simnet.

This commit is contained in:
Colin Lyons
2023-01-07 18:34:12 +00:00
parent 0cd9b7ba96
commit 2a0ca20fd2
6 changed files with 182 additions and 38 deletions

View File

@@ -87,7 +87,7 @@ var commands = &cmds.Command{
Label: "listen",
Description: "A list of listener multiaddresses. Example: /ip4/0.0.0.0/tcp/8337",
Documentation: lorem,
Default: "/ip4/127.0.0.1/tcp/8337",
Default: "/ip4/127.0.0.1/tcp/8337,/ip6/::1/tcp/8337",
}, multiAddrSanitizer),
},
Entrypoint: func(c *cmds.Command, args []string) error {

View File

@@ -6,6 +6,9 @@ ARG ARCH=amd64
ARG GOARCH=amd64
ENV GO111MODULE=on GOOS=linux
RUN set -ex \
&& apt update && apt install net-tools
WORKDIR /indra
# ENV defaults

View File

@@ -1,6 +1,8 @@
version: '3'
services:
seed0:
sysctls:
- "net.ipv6.conf.all.disable_ipv6=0"
image: indralabs/indra-dev:latest
container_name: indra-seed0
volumes:
@@ -8,14 +10,52 @@ services:
- ./../../:/indra
networks:
indranet:
#ipv4_address: 172.16.238.2
expose:
- 62134
- 62135
environment:
INDRA_SERVE_KEY: "66T7j5JnhsjDTqVvV8zEM2rTUobu66tocizfqArVEnPJ"
INDRA_SERVE_LISTEN: "/ip4/0.0.0.0/tcp/62134"
INDRA_SERVE_KEY: "66T7j5JnhsjDTqVvV8zEM2rTUobu66tocizfqArVEnP1"
INDRA_SERVE_LISTEN: "/ip4/0.0.0.0/tcp/62134,/ip6/::/tcp/62134"
seed1:
sysctls:
- "net.ipv6.conf.all.disable_ipv6=0"
image: indralabs/indra-dev:latest
container_name: indra-seed1
volumes:
- seed1_gopath:/go
- ./../../:/indra
networks:
indranet:
depends_on:
- seed0
expose:
- 62134
- 62135
environment:
INDRA_SERVE_KEY: "66T7j5JnhsjDTqVvV8zEM2rTUobu66tocizfqArVEnP2"
INDRA_SERVE_LISTEN: "/ip4/0.0.0.0/tcp/62134,/ip6/::/tcp/62134"
seed2:
sysctls:
- "net.ipv6.conf.all.disable_ipv6=0"
image: indralabs/indra-dev:latest
container_name: indra-seed2
volumes:
- seed2_gopath:/go
- ./../../:/indra
networks:
indranet:
depends_on:
- seed0
- seed1
expose:
- 62134
- 62135
environment:
INDRA_SERVE_KEY: "66T7j5JnhsjDTqVvV8zEM2rTUobu66tocizfqArVEnP3"
INDRA_SERVE_LISTEN: "/ip4/0.0.0.0/tcp/62134,/ip6/::/tcp/62134"
peer0:
sysctls:
- "net.ipv6.conf.all.disable_ipv6=0"
image: indralabs/indra-dev:latest
container_name: indra-peer0
volumes:
@@ -23,15 +63,19 @@ services:
- ./../../:/indra
networks:
indranet:
#ipv4_address: 172.16.238.3
depends_on:
- seed0
- seed1
- seed2
expose:
- 62134
- 62135
environment:
INDRA_SERVE_SEED: "/dns4/seed0/tcp/62134/p2p/16Uiu2HAm2LgowPNBM47dR6gSJmEeQaqCZ6u4WPhTCSWkxyNrfAxo"
#INDRA_SERVE_SEED: "/dns4/seed0/tcp/62134/p2p/16Uiu2HAm2LgowPNBM47dR6gSJmEeQaqCZ6u4WPhTCSWkxyNrfAxo"
INDRA_SERVE_LISTEN: "/ip4/0.0.0.0/tcp/62134,/ip6/::/tcp/62134"
peer1:
sysctls:
- "net.ipv6.conf.all.disable_ipv6=0"
image: indralabs/indra-dev:latest
container_name: indra-peer1
volumes:
@@ -39,26 +83,55 @@ services:
- ./../../:/indra
networks:
indranet:
#ipv4_address: 172.16.238.4
depends_on:
- seed0
- seed1
- seed2
expose:
- 62134
- 62135
environment:
INDRA_SERVE_SEED: "/dns4/seed0/tcp/62134/p2p/16Uiu2HAm2LgowPNBM47dR6gSJmEeQaqCZ6u4WPhTCSWkxyNrfAxo"
#INDRA_SERVE_SEED: "/dns4/seed0/tcp/62134/p2p/16Uiu2HAm2LgowPNBM47dR6gSJmEeQaqCZ6u4WPhTCSWkxyNrfAxo"
INDRA_SERVE_LISTEN: "/ip4/0.0.0.0/tcp/62134,/ip6/::/tcp/62134"
peer2:
sysctls:
- "net.ipv6.conf.all.disable_ipv6=0"
image: indralabs/indra-dev:latest
container_name: indra-peer2
volumes:
- peer1_gopath:/go
- ./../../:/indra
networks:
indranet:
depends_on:
- seed0
- seed1
- seed2
#expose:
# - 62134
# - 62135
environment:
#INDRA_SERVE_SEED: "/dns4/seed0/tcp/62134/p2p/16Uiu2HAm2LgowPNBM47dR6gSJmEeQaqCZ6u4WPhTCSWkxyNrfAxo"
INDRA_SERVE_LISTEN: "/ip4/127.0.0.1/tcp/62134,/ip6/::1/tcp/62134"
volumes:
seed0_gopath:
seed1_gopath:
seed2_gopath:
peer0_gopath:
peer1_gopath:
peer2_gopath:
networks:
indranet:
driver: bridge
driver_opts:
com.docker.network.enable_ipv6: "true"
ipam:
driver: default
config:
- subnet: 172.16.238.0/24
gateway: 172.16.238.1
- subnet: 2001:3984:3989::/64
gateway: 2001:3984:3989::1
# docker build -t indralabs/indra-dev -f docker/indra/Dockerfile.dev .

View File

@@ -2,4 +2,4 @@
go mod tidy
go run ./cmd/indra/. serve
IPFS_LOGGING=info go run ./cmd/indra/. -lcl serve

View File

@@ -94,5 +94,9 @@ var SimnetServerParams = &Params{
DefaultPort: "62134",
// Should be passed via --seed
DNSSeedAddresses: []*DNSSeedAddress{},
DNSSeedAddresses: []*DNSSeedAddress{
NewSeedAddress("seed0", "16Uiu2HAmCxWoKp4vs7xrmzbScHEhUK7trCgCPhKPZRBiUvSxS7xA"),
NewSeedAddress("seed1", "16Uiu2HAmTKk6BvJFPmcQ6q92XgvQ4ZPu1AVjQxMvCfM4you9Zyvc"),
NewSeedAddress("seed2", "16Uiu2HAm8tCAW7D9WFLxkda52R73nSk9yBCFW8uwA4MZPzHYVhnW"),
},
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/libp2p/go-libp2p/core/peer"
"github.com/multiformats/go-multiaddr"
"sync"
"time"
)
var (
@@ -60,34 +61,66 @@ func (srv *Server) Shutdown() (err error) {
return nil
}
func (srv *Server) Serve() (err error) {
func seedConnect(ctx context.Context, attempts int) {
log.I.Ln("bootstrapping the DHT")
}
// Bootstrap the DHT. In the default configuration, this spawns a Background
// thread that will refresh the peer table every five minutes.
if err = srv.dht.Bootstrap(srv.Context); check(err) {
return err
func peer_metrics(host host.Host, quitChan <-chan struct{}) {
for {
select {
case <- quitChan:
break
default:
}
log.I.Ln("peers:",len(host.Network().Peers()))
time.Sleep(10 * time.Second)
}
}
log.I.Ln("attempting to peer with seed addresses...")
func (srv *Server) attempt(ctx context.Context, peer *peer.AddrInfo, attempts_left int, wg sync.WaitGroup) {
// We will first attempt to connect to the seed addresses.
var wg sync.WaitGroup
log.I.Ln("attempting connection to", peer.ID)
var seedAddresses []multiaddr.Multiaddr
defer wg.Done()
if err := srv.host.Connect(srv.Context, *peer); check(err) {
log.E.Ln("connection attempt failed to", peer.ID)
attempts_left--
if attempts_left <= 0 {
return
}
time.Sleep(10 * time.Second)
srv.attempt(ctx, peer, attempts_left, wg)
if seedAddresses, err = srv.params.ParseSeedMultiAddresses(); check(err) {
return
}
seedAddresses = append(seedAddresses, srv.config.SeedAddresses...)
log.I.Ln("connection established with seed node:", peer.ID)
log.I.Ln("seed peers:")
ctx.Done()
}
func (srv *Server) seed_connect() (err error) {
log.I.Ln("attempting to peer with seed addresses...")
// We will first attempt to seed_connect to the seed addresses.
var wg sync.WaitGroup
var peerInfo *peer.AddrInfo
for _, peerAddr := range seedAddresses {
for _, peerAddr := range srv.config.SeedAddresses {
log.I.Ln("-", peerAddr.String())
@@ -95,22 +128,42 @@ func (srv *Server) Serve() (err error) {
return
}
if peerInfo.ID == srv.host.ID() {
log.I.Ln("attempting to seed_connect to self, skipping...")
continue
}
wg.Add(1)
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 30 * time.Second)
defer cancel()
defer wg.Done()
if err := srv.host.Connect(srv.Context, *peerInfo); check(err) {
return
}
log.I.Ln("connection established with seed node:", peerInfo.ID)
}()
go srv.attempt(ctx, peerInfo, 3, wg)
}
wg.Wait()
return
}
func (srv *Server) Serve() (err error) {
go peer_metrics(srv.host, srv.Context.Done())
//log.I.Ln("bootstrapping the DHT")
// Bootstrap the DHT. In the default configuration, this spawns a Background
// thread that will refresh the peer table every five minutes.
if err = srv.dht.Bootstrap(srv.Context); check(err) {
return err
}
if err = srv.seed_connect(); check(err) {
return
}
select {
case <-srv.Context.Done():
srv.Shutdown()
@@ -139,19 +192,30 @@ func New(params *cfg.Params, config *Config) (srv *Server, err error) {
return nil, err
}
log.I.Ln("host id:")
log.I.Ln("-", s.host.ID())
log.I.Ln("p2p listeners:")
log.I.Ln("-", s.host.Addrs())
log.I.Ln("host id:")
log.I.Ln("-", s.host.ID())
var seedAddresses []multiaddr.Multiaddr
if seedAddresses, err = params.ParseSeedMultiAddresses(); check(err) {
return
}
config.SeedAddresses = append(config.SeedAddresses, seedAddresses...)
log.I.Ln("seed addresses:")
log.I.Ln("-", config.SeedAddresses)
// Start a DHT, for use in peer discovery. We can't just make a new DHT
// client because we want each peer to maintain its own local copy of the
// DHT, so that the bootstrapping node of the DHT can go down without
// inhibiting future peer discovery.
if s.dht, err = dht.New(s.Context, s.host); check(err) {
return nil, err
}
//if s.dht, err = dht.New(s.Context, s.host); check(err) {
// return nil, err
//}
return &s, err
}