Merge pull request #329 from jzelinskie/debug-jwt

Fix panic on binary start
This commit is contained in:
Jimmy Zelinskie 2017-06-06 12:18:20 -04:00 committed by GitHub
commit 6c3ddaefb3
4 changed files with 93 additions and 77 deletions

View file

@ -155,28 +155,58 @@ func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string,
claims := parsedJWT.Claims()
if iss, ok := claims.Issuer(); !ok || iss != cfgIss {
log.WithFields(log.Fields{
"exists": ok,
"claim": iss,
"config": cfgIss,
}).Debugln("unequal or missing issuer when validating JWT")
return jwt.ErrInvalidISSClaim
}
if aud, ok := claims.Audience(); !ok || !validAudience(aud, cfgAud) {
log.WithFields(log.Fields{
"exists": ok,
"claim": aud,
"config": cfgAud,
}).Debugln("unequal or missing audience when validating JWT")
return jwt.ErrInvalidAUDClaim
}
if ihClaim, ok := claims.Get("infohash").(string); !ok || !validInfoHash(ihClaim, ih) {
log.WithFields(log.Fields{
"exists": ok,
"request": ih,
"claim": ihClaim,
}).Debugln("unequal or missing infohash when validating JWT")
return errors.New("claim \"infohash\" is invalid")
}
parsedJWS := parsedJWT.(jws.JWS)
kid, ok := parsedJWS.Protected().Get("kid").(string)
if !ok {
log.WithFields(log.Fields{
"exists": ok,
"claim": kid,
}).Debugln("missing kid when validating JWT")
return errors.New("invalid kid")
}
publicKey, ok := publicKeys[kid]
if !ok {
log.WithFields(log.Fields{
"kid": kid,
}).Debugln("missing public key for kid when validating JWT")
return errors.New("signed by unknown kid")
}
return parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
err = parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
if err != nil {
log.WithFields(log.Fields{
"err": err,
}).Debugln("failed to verify signature of JWT")
return err
}
return nil
}
func validAudience(aud []string, cfgAud string) bool {
@ -188,6 +218,8 @@ func validAudience(aud []string, cfgAud string) bool {
return false
}
// validInfoHash attempts to match the claim for the Infohash field of a JWT by
// checking both the raw and unescaped forms of the contents of the field.
func validInfoHash(claim string, ih bittorrent.InfoHash) bool {
if len(claim) == 20 && bittorrent.InfoHashFromString(claim) == ih {
return true

View file

@ -12,7 +12,6 @@ import (
"time"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
@ -23,44 +22,10 @@ import (
const Name = "memory"
func init() {
// Register Prometheus metrics.
prometheus.MustRegister(
promGCDurationMilliseconds,
promInfohashesCount,
promSeedersCount,
promLeechersCount,
)
// Register the storage driver.
storage.RegisterDriver(Name, driver{})
}
var promGCDurationMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "chihaya_storage_gc_duration_milliseconds",
Help: "The time it takes to perform storage garbage collection",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
})
var promInfohashesCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_infohashes_count",
Help: "The number of Infohashes tracked",
})
var promSeedersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_seeders_count",
Help: "The number of seeders tracked",
})
var promLeechersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_leechers_count",
Help: "The number of leechers tracked",
})
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
promGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
type driver struct{}
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
@ -272,9 +237,14 @@ func (ps *peerStore) populateProm() {
s.RUnlock()
}
promInfohashesCount.Set(float64(numInfohashes))
promSeedersCount.Set(float64(numSeeders))
promLeechersCount.Set(float64(numLeechers))
storage.PromInfohashesCount.Set(float64(numInfohashes))
storage.PromSeedersCount.Set(float64(numSeeders))
storage.PromLeechersCount.Set(float64(numLeechers))
}
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
storage.PromGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
func (ps *peerStore) getClock() int64 {

View file

@ -13,7 +13,6 @@ import (
"time"
log "github.com/Sirupsen/logrus"
"github.com/prometheus/client_golang/prometheus"
"gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
@ -24,44 +23,10 @@ import (
const Name = "memorybysubnet"
func init() {
// Register Prometheus metrics.
prometheus.MustRegister(
promGCDurationMilliseconds,
promInfohashesCount,
promSeedersCount,
promLeechersCount,
)
// Register the storage driver.
storage.RegisterDriver(Name, driver{})
}
var promGCDurationMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "chihaya_storage_gc_duration_milliseconds",
Help: "The time it takes to perform storage garbage collection",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
})
var promInfohashesCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_infohashes_count",
Help: "The number of Infohashes tracked",
})
var promSeedersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_seeders_count",
Help: "The number of seeders tracked",
})
var promLeechersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_leechers_count",
Help: "The number of leechers tracked",
})
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
promGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
type driver struct{}
func (d driver) NewPeerStore(icfg interface{}) (storage.PeerStore, error) {
@ -311,9 +276,14 @@ func (ps *peerStore) populateProm() {
s.RUnlock()
}
promInfohashesCount.Set(float64(numInfohashes))
promSeedersCount.Set(float64(numSeeders))
promLeechersCount.Set(float64(numLeechers))
storage.PromInfohashesCount.Set(float64(numInfohashes))
storage.PromSeedersCount.Set(float64(numSeeders))
storage.PromLeechersCount.Set(float64(numLeechers))
}
// recordGCDuration records the duration of a GC sweep.
func recordGCDuration(duration time.Duration) {
storage.PromGCDurationMilliseconds.Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}
func (ps *peerStore) getClock() int64 {

44
storage/prometheus.go Normal file
View file

@ -0,0 +1,44 @@
package storage
import "github.com/prometheus/client_golang/prometheus"
func init() {
// Register the metrics.
prometheus.MustRegister(
PromGCDurationMilliseconds,
PromInfohashesCount,
PromSeedersCount,
PromLeechersCount,
)
}
var (
// PromGCDurationMilliseconds is a histogram used by storage to record the
// durations of execution time required for removing expired peers.
PromGCDurationMilliseconds = prometheus.NewHistogram(prometheus.HistogramOpts{
Name: "chihaya_storage_gc_duration_milliseconds",
Help: "The time it takes to perform storage garbage collection",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
})
// PromInfohashesCount is a gauge used to hold the current total amount of
// unique swarms being tracked by a storage.
PromInfohashesCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_infohashes_count",
Help: "The number of Infohashes tracked",
})
// PromSeedersCount is a gauge used to hold the current total amount of
// unique seeders per swarm.
PromSeedersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_seeders_count",
Help: "The number of seeders tracked",
})
// PromLeechersCount is a gauge used to hold the current total amount of
// unique leechers per swarm.
PromLeechersCount = prometheus.NewGauge(prometheus.GaugeOpts{
Name: "chihaya_storage_leechers_count",
Help: "The number of leechers tracked",
})
)