general: lint clean, vet clean
This commit is contained in:
parent
f17ad3e24d
commit
0c02ac9980
7 changed files with 36 additions and 15 deletions
|
@ -11,7 +11,10 @@ import (
|
||||||
"github.com/chihaya/chihaya/pkg/event"
|
"github.com/chihaya/chihaya/pkg/event"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// PeerID represents a peer ID.
|
||||||
type PeerID string
|
type PeerID string
|
||||||
|
|
||||||
|
// InfoHash represents an infohash in hexadecimal notation.
|
||||||
type InfoHash string
|
type InfoHash string
|
||||||
|
|
||||||
// AnnounceRequest represents the parsed parameters from an announce request.
|
// AnnounceRequest represents the parsed parameters from an announce request.
|
||||||
|
|
|
@ -40,6 +40,9 @@ type httpServer struct {
|
||||||
grace *graceful.Server
|
grace *graceful.Server
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start runs the server and blocks until it has exited.
|
||||||
|
//
|
||||||
|
// It panics if the server exits unexpectedly.
|
||||||
func (s *httpServer) Start() {
|
func (s *httpServer) Start() {
|
||||||
s.grace = &graceful.Server{
|
s.grace = &graceful.Server{
|
||||||
Server: &http.Server{
|
Server: &http.Server{
|
||||||
|
@ -81,6 +84,7 @@ func (s *httpServer) Start() {
|
||||||
log.Println("HTTP server shut down cleanly")
|
log.Println("HTTP server shut down cleanly")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops the server and blocks until the server has exited.
|
||||||
func (s *httpServer) Stop() {
|
func (s *httpServer) Stop() {
|
||||||
s.grace.Stop(s.grace.Timeout)
|
s.grace.Stop(s.grace.Timeout)
|
||||||
<-s.grace.StopChan()
|
<-s.grace.StopChan()
|
||||||
|
|
|
@ -71,6 +71,9 @@ type Server struct {
|
||||||
|
|
||||||
var _ server.Server = &Server{}
|
var _ server.Server = &Server{}
|
||||||
|
|
||||||
|
// Start starts the prometheus server and blocks until it exits.
|
||||||
|
//
|
||||||
|
// It panics if the server exits unexpectedly.
|
||||||
func (s *Server) Start() {
|
func (s *Server) Start() {
|
||||||
s.grace = &graceful.Server{
|
s.grace = &graceful.Server{
|
||||||
Server: &http.Server{
|
Server: &http.Server{
|
||||||
|
@ -93,6 +96,7 @@ func (s *Server) Start() {
|
||||||
log.Println("Prometheus server shut down cleanly")
|
log.Println("Prometheus server shut down cleanly")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops the prometheus server and blocks until it exits.
|
||||||
func (s *Server) Stop() {
|
func (s *Server) Stop() {
|
||||||
s.grace.Stop(s.cfg.ShutdownTimeout)
|
s.grace.Stop(s.cfg.ShutdownTimeout)
|
||||||
<-s.grace.StopChan()
|
<-s.grace.StopChan()
|
||||||
|
|
|
@ -46,6 +46,11 @@ func New(cfg *chihaya.ServerConfig, tkr *tracker.Tracker) (Server, error) {
|
||||||
|
|
||||||
// Server represents one instance of a server accessing the tracker.
|
// Server represents one instance of a server accessing the tracker.
|
||||||
type Server interface {
|
type Server interface {
|
||||||
|
// Start starts a server and blocks until the server exits.
|
||||||
|
//
|
||||||
|
// It should panic if the server exits unexpectedly.
|
||||||
Start()
|
Start()
|
||||||
|
|
||||||
|
// Stop stops a server and blocks until the server exits.
|
||||||
Stop()
|
Stop()
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,8 +50,8 @@ func TestPeerStoreAPI(t *testing.T) {
|
||||||
1,
|
1,
|
||||||
}
|
}
|
||||||
config = store.DriverConfig{
|
config = store.DriverConfig{
|
||||||
"memory",
|
Name: "memory",
|
||||||
unmarshalledConfig,
|
Config: unmarshalledConfig,
|
||||||
}
|
}
|
||||||
d = &peerStoreDriver{}
|
d = &peerStoreDriver{}
|
||||||
)
|
)
|
||||||
|
@ -62,9 +62,9 @@ func TestPeerStoreAPI(t *testing.T) {
|
||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
// Construct chihaya.Peer from test data.
|
// Construct chihaya.Peer from test data.
|
||||||
peer := chihaya.Peer{
|
peer := chihaya.Peer{
|
||||||
chihaya.PeerID(p.peerID),
|
ID: chihaya.PeerID(p.peerID),
|
||||||
net.ParseIP(p.ip),
|
IP: net.ParseIP(p.ip),
|
||||||
p.port,
|
Port: p.port,
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.seeder {
|
if p.seeder {
|
||||||
|
@ -95,9 +95,9 @@ func TestPeerStoreAPI(t *testing.T) {
|
||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
// Construct chihaya.Peer from test data.
|
// Construct chihaya.Peer from test data.
|
||||||
peer := chihaya.Peer{
|
peer := chihaya.Peer{
|
||||||
chihaya.PeerID(p.peerID),
|
ID: chihaya.PeerID(p.peerID),
|
||||||
net.ParseIP(p.ip),
|
IP: net.ParseIP(p.ip),
|
||||||
p.port,
|
Port: p.port,
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.seeder {
|
if p.seeder {
|
||||||
|
@ -121,9 +121,9 @@ func TestPeerStoreAPI(t *testing.T) {
|
||||||
for _, p := range peers {
|
for _, p := range peers {
|
||||||
// Construct chihaya.Peer from test data.
|
// Construct chihaya.Peer from test data.
|
||||||
peer := chihaya.Peer{
|
peer := chihaya.Peer{
|
||||||
chihaya.PeerID(p.peerID),
|
ID: chihaya.PeerID(p.peerID),
|
||||||
net.ParseIP(p.ip),
|
IP: net.ParseIP(p.ip),
|
||||||
p.port,
|
Port: p.port,
|
||||||
}
|
}
|
||||||
if p.seeder {
|
if p.seeder {
|
||||||
s.PutSeeder(hash, peer)
|
s.PutSeeder(hash, peer)
|
||||||
|
@ -136,9 +136,9 @@ func TestPeerStoreAPI(t *testing.T) {
|
||||||
assert.Equal(t, 6, s.NumSeeders(hash))
|
assert.Equal(t, 6, s.NumSeeders(hash))
|
||||||
assert.Equal(t, 4, s.NumLeechers(hash))
|
assert.Equal(t, 4, s.NumLeechers(hash))
|
||||||
peer := chihaya.Peer{
|
peer := chihaya.Peer{
|
||||||
chihaya.PeerID(peers[0].peerID),
|
ID: chihaya.PeerID(peers[0].peerID),
|
||||||
net.ParseIP(peers[0].ip),
|
IP: net.ParseIP(peers[0].ip),
|
||||||
peers[0].port,
|
Port: peers[0].port,
|
||||||
}
|
}
|
||||||
err = s.GraduateLeecher(hash, peer)
|
err = s.GraduateLeecher(hash, peer)
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
|
@ -57,6 +57,7 @@ func constructor(srvcfg *chihaya.ServerConfig, tkr *tracker.Tracker) (server.Ser
|
||||||
return theStore, nil
|
return theStore, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Config represents the configuration for the store.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Addr string `yaml:"addr"`
|
Addr string `yaml:"addr"`
|
||||||
RequestTimeout time.Duration `yaml:"request_timeout"`
|
RequestTimeout time.Duration `yaml:"request_timeout"`
|
||||||
|
@ -68,6 +69,7 @@ type Config struct {
|
||||||
StringStore DriverConfig `yaml:"string_store"`
|
StringStore DriverConfig `yaml:"string_store"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DriverConfig represents the configuration for a store driver.
|
||||||
type DriverConfig struct {
|
type DriverConfig struct {
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Config interface{} `yaml:"config"`
|
Config interface{} `yaml:"config"`
|
||||||
|
@ -99,6 +101,7 @@ func MustGetStore() *Store {
|
||||||
return theStore
|
return theStore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Store provides storage for a tracker.
|
||||||
type Store struct {
|
type Store struct {
|
||||||
cfg *Config
|
cfg *Config
|
||||||
tkr *tracker.Tracker
|
tkr *tracker.Tracker
|
||||||
|
@ -110,12 +113,14 @@ type Store struct {
|
||||||
StringStore
|
StringStore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start starts the store drivers and blocks until all of them exit.
|
||||||
func (s *Store) Start() {
|
func (s *Store) Start() {
|
||||||
<-s.shutdown
|
<-s.shutdown
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
log.Println("Store server shut down cleanly")
|
log.Println("Store server shut down cleanly")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops the store drivers and waits for them to exit.
|
||||||
func (s *Store) Stop() {
|
func (s *Store) Stop() {
|
||||||
close(s.shutdown)
|
close(s.shutdown)
|
||||||
s.wg.Wait()
|
s.wg.Wait()
|
||||||
|
|
|
@ -16,7 +16,7 @@ type StringStore interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringStoreDriver represents an interface for creating a handle to the
|
// StringStoreDriver represents an interface for creating a handle to the
|
||||||
// storage of swarms.
|
// storage of strings.
|
||||||
type StringStoreDriver interface {
|
type StringStoreDriver interface {
|
||||||
New(*DriverConfig) (StringStore, error)
|
New(*DriverConfig) (StringStore, error)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue