From ad955d73dbd19cc1559bb86b314b7eae7482484e Mon Sep 17 00:00:00 2001 From: Jimmy Zelinskie Date: Thu, 23 Oct 2014 00:59:50 -0400 Subject: [PATCH] Vendor third party dependencies. --- Godeps/_workspace/.gitignore | 2 - .../p/go.net/netutil/listen.go | 48 + .../p/go.net/netutil/listen_test.go | 103 ++ .../src/github.com/chihaya/bencode/.gitignore | 23 + .../github.com/chihaya/bencode/.travis.yml | 11 + .../src/github.com/chihaya/bencode/AUTHORS | 5 + .../src/github.com/chihaya/bencode/LICENSE | 25 + .../src/github.com/chihaya/bencode/README.md | 10 + .../src/github.com/chihaya/bencode/bencode.go | 23 + .../src/github.com/chihaya/bencode/decoder.go | 135 +++ .../chihaya/bencode/decoder_test.go | 89 ++ .../src/github.com/chihaya/bencode/encoder.go | 151 +++ .../chihaya/bencode/encoder_test.go | 70 ++ .../src/github.com/golang/glog/LICENSE | 191 +++ .../src/github.com/golang/glog/README | 44 + .../src/github.com/golang/glog/glog.go | 1034 +++++++++++++++++ .../src/github.com/golang/glog/glog_file.go | 124 ++ .../src/github.com/golang/glog/glog_test.go | 333 ++++++ .../julienschmidt/httprouter/.travis.yml | 6 + .../julienschmidt/httprouter/LICENSE | 24 + .../julienschmidt/httprouter/README.md | 234 ++++ .../julienschmidt/httprouter/path.go | 123 ++ .../julienschmidt/httprouter/path_test.go | 92 ++ .../julienschmidt/httprouter/router.go | 317 +++++ .../julienschmidt/httprouter/router_test.go | 329 ++++++ .../julienschmidt/httprouter/tree.go | 534 +++++++++ .../julienschmidt/httprouter/tree_test.go | 559 +++++++++ .../github.com/pushrax/faststats/.travis.yml | 6 + .../src/github.com/pushrax/faststats/AUTHORS | 4 + .../src/github.com/pushrax/faststats/LICENSE | 24 + .../github.com/pushrax/faststats/README.md | 5 + .../github.com/pushrax/faststats/faststats.go | 10 + .../src/github.com/pushrax/faststats/json.go | 11 + .../pushrax/faststats/percentile.go | 101 ++ .../pushrax/faststats/percentile_test.go | 77 ++ .../src/github.com/pushrax/faststats/util.go | 40 + .../github.com/pushrax/flatjson/.travis.yml | 6 + .../src/github.com/pushrax/flatjson/AUTHORS | 4 + .../src/github.com/pushrax/flatjson/LICENSE | 24 + .../src/github.com/pushrax/flatjson/README.md | 68 ++ .../github.com/pushrax/flatjson/flatjson.go | 104 ++ .../pushrax/flatjson/flatjson_test.go | 143 +++ .../github.com/stretchr/graceful/.gitignore | 23 + .../src/github.com/stretchr/graceful/LICENSE | 21 + .../github.com/stretchr/graceful/README.md | 114 ++ .../github.com/stretchr/graceful/graceful.go | 272 +++++ .../stretchr/graceful/graceful_test.go | 322 +++++ .../stretchr/graceful/tests/main.go | 40 + .../github.com/stretchr/graceful/wercker.yml | 1 + .../src/github.com/stretchr/pat/stop/doc.go | 46 + .../src/github.com/stretchr/pat/stop/stop.go | 57 + .../github.com/stretchr/pat/stop/stop_test.go | 76 ++ 52 files changed, 6236 insertions(+), 2 deletions(-) delete mode 100644 Godeps/_workspace/.gitignore create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen.go create mode 100644 Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen_test.go create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/.gitignore create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/LICENSE create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/README.md create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/bencode.go create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/decoder.go create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/decoder_test.go create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/encoder.go create mode 100644 Godeps/_workspace/src/github.com/chihaya/bencode/encoder_test.go create mode 100644 Godeps/_workspace/src/github.com/golang/glog/LICENSE create mode 100644 Godeps/_workspace/src/github.com/golang/glog/README create mode 100644 Godeps/_workspace/src/github.com/golang/glog/glog.go create mode 100644 Godeps/_workspace/src/github.com/golang/glog/glog_file.go create mode 100644 Godeps/_workspace/src/github.com/golang/glog/glog_test.go create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go create mode 100644 Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/LICENSE create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/README.md create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/faststats.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/json.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/percentile.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/percentile_test.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/faststats/util.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/flatjson/.travis.yml create mode 100644 Godeps/_workspace/src/github.com/pushrax/flatjson/AUTHORS create mode 100644 Godeps/_workspace/src/github.com/pushrax/flatjson/LICENSE create mode 100644 Godeps/_workspace/src/github.com/pushrax/flatjson/README.md create mode 100644 Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson.go create mode 100644 Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/.gitignore create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/LICENSE create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/README.md create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/graceful.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/graceful_test.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/tests/main.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/graceful/wercker.yml create mode 100644 Godeps/_workspace/src/github.com/stretchr/pat/stop/doc.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/pat/stop/stop.go create mode 100644 Godeps/_workspace/src/github.com/stretchr/pat/stop/stop_test.go diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d68..0000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen.go b/Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen.go new file mode 100644 index 0000000..a2591f8 --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen.go @@ -0,0 +1,48 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package netutil provides network utility functions, complementing the more +// common ones in the net package. +package netutil + +import ( + "net" + "sync" +) + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen_test.go b/Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen_test.go new file mode 100644 index 0000000..ac87e0e --- /dev/null +++ b/Godeps/_workspace/src/code.google.com/p/go.net/netutil/listen_test.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.3 + +// (We only run this test on Go 1.3 because the HTTP client timeout behavior +// was bad in previous releases, causing occasional deadlocks.) + +package netutil + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "sync" + "sync/atomic" + "testing" + "time" +) + +func TestLimitListener(t *testing.T) { + const ( + max = 5 + num = 200 + ) + + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("Listen: %v", err) + } + defer l.Close() + l = LimitListener(l, max) + + var open int32 + go http.Serve(l, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if n := atomic.AddInt32(&open, 1); n > max { + t.Errorf("%d open connections, want <= %d", n, max) + } + defer atomic.AddInt32(&open, -1) + time.Sleep(10 * time.Millisecond) + fmt.Fprint(w, "some body") + })) + + var wg sync.WaitGroup + var failed int32 + for i := 0; i < num; i++ { + wg.Add(1) + go func() { + defer wg.Done() + c := http.Client{Timeout: 3 * time.Second} + r, err := c.Get("http://" + l.Addr().String()) + if err != nil { + t.Logf("Get: %v", err) + atomic.AddInt32(&failed, 1) + return + } + defer r.Body.Close() + io.Copy(ioutil.Discard, r.Body) + }() + } + wg.Wait() + + // We expect some Gets to fail as the kernel's accept queue is filled, + // but most should succeed. + if failed >= num/2 { + t.Errorf("too many Gets failed: %v", failed) + } +} + +type errorListener struct { + net.Listener +} + +func (errorListener) Accept() (net.Conn, error) { + return nil, errFake +} + +var errFake = errors.New("fake error from errorListener") + +// This used to hang. +func TestLimitListenerError(t *testing.T) { + donec := make(chan bool, 1) + go func() { + const n = 2 + ll := LimitListener(errorListener{}, n) + for i := 0; i < n+1; i++ { + _, err := ll.Accept() + if err != errFake { + t.Fatalf("Accept error = %v; want errFake", err) + } + } + donec <- true + }() + select { + case <-donec: + case <-time.After(5 * time.Second): + t.Fatal("timeout. deadlock?") + } +} diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/.gitignore b/Godeps/_workspace/src/github.com/chihaya/bencode/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/.travis.yml b/Godeps/_workspace/src/github.com/chihaya/bencode/.travis.yml new file mode 100644 index 0000000..39f0f17 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: 1.3 + +notifications: + irc: + channels: + - "irc.freenode.net#chihaya" + on_success: always + on_failure: always + email: false diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/AUTHORS b/Godeps/_workspace/src/github.com/chihaya/bencode/AUTHORS new file mode 100644 index 0000000..b62effb --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/AUTHORS @@ -0,0 +1,5 @@ +# This is the official list of Chihaya authors for copyright purposes, in alphabetical order. + +Jimmy Zelinskie +Justin Li + diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/LICENSE b/Godeps/_workspace/src/github.com/chihaya/bencode/LICENSE new file mode 100644 index 0000000..126b004 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/LICENSE @@ -0,0 +1,25 @@ +bencode is released under a BSD 2-Clause license, reproduced below. + +Copyright (c) 2014, The Chihaya Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/README.md b/Godeps/_workspace/src/github.com/chihaya/bencode/README.md new file mode 100644 index 0000000..0e6645f --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/README.md @@ -0,0 +1,10 @@ +# bencode [![Build Status](https://api.travis-ci.org/chihaya/bencode.svg?branch=master)](https://travis-ci.org/chihaya/bencode) + +Package bencode implements bencoding of data as defined in [BEP 3][]. +It uses type assertion over reflection for performance. + +[BEP 3]: http://www.bittorrent.org/beps/bep_0003.html + +## Documentation + +Refer to the [GoDoc](http://godoc.org/github.com/chihaya/bencode). diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/bencode.go b/Godeps/_workspace/src/github.com/chihaya/bencode/bencode.go new file mode 100644 index 0000000..c020d10 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/bencode.go @@ -0,0 +1,23 @@ +// Copyright 2014 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +// Package bencode implements bencoding of data as defined in BEP 3 using +// type assertion over reflection for performance. +package bencode + +// Dict represents a bencode dictionary. +type Dict map[string]interface{} + +// NewDict allocates the memory for a Dict. +func NewDict() Dict { + return make(Dict) +} + +// List represents a bencode list. +type List []interface{} + +// NewList allocates the memory for a List. +func NewList() List { + return make(List, 0) +} diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/decoder.go b/Godeps/_workspace/src/github.com/chihaya/bencode/decoder.go new file mode 100644 index 0000000..13481e1 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/decoder.go @@ -0,0 +1,135 @@ +// Copyright 2014 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package bencode + +import ( + "bufio" + "bytes" + "errors" + "io" + "strconv" +) + +// A Decoder reads bencoded objects from an input stream. +type Decoder struct { + r *bufio.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: bufio.NewReader(r)} +} + +// Decode unmarshals the next bencoded value in the stream. +func (dec *Decoder) Decode() (interface{}, error) { + return unmarshal(dec.r) +} + +// Unmarshal deserializes and returns the bencoded value in buf. +func Unmarshal(buf []byte) (interface{}, error) { + r := bufio.NewReader(bytes.NewBuffer(buf)) + return unmarshal(r) +} + +// unmarshal reads bencoded values from a bufio.Reader +func unmarshal(r *bufio.Reader) (interface{}, error) { + tok, err := r.ReadByte() + if err != nil { + return nil, err + } + + switch tok { + case 'i': + return readTerminatedInt(r, 'e') + + case 'l': + list := NewList() + for { + ok, err := readTerminator(r, 'e') + if err != nil { + return nil, err + } else if ok { + break + } + + v, err := unmarshal(r) + if err != nil { + return nil, err + } + list = append(list, v) + } + return list, nil + + case 'd': + dict := NewDict() + for { + ok, err := readTerminator(r, 'e') + if err != nil { + return nil, err + } else if ok { + break + } + + v, err := unmarshal(r) + if err != nil { + return nil, err + } + + key, ok := v.(string) + if !ok { + return nil, errors.New("bencode: non-string map key") + } + + dict[key], err = unmarshal(r) + if err != nil { + return nil, err + } + } + return dict, nil + + default: + err = r.UnreadByte() + if err != nil { + return nil, err + } + + length, err := readTerminatedInt(r, ':') + if err != nil { + return nil, errors.New("bencode: unknown input sequence") + } + + buf := make([]byte, length) + n, err := r.Read(buf) + + if err != nil { + return nil, err + } else if int64(n) != length { + return nil, errors.New("bencode: short read") + } + + return string(buf), nil + } +} + +func readTerminator(r *bufio.Reader, term byte) (bool, error) { + tok, err := r.ReadByte() + if err != nil { + return false, err + } else if tok == term { + return true, nil + } + return false, r.UnreadByte() +} + +func readTerminatedInt(r *bufio.Reader, term byte) (int64, error) { + buf, err := r.ReadSlice(term) + if err != nil { + return 0, err + } else if len(buf) <= 1 { + return 0, errors.New("bencode: empty integer field") + } + + return strconv.ParseInt(string(buf[:len(buf)-1]), 10, 64) +} diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/decoder_test.go b/Godeps/_workspace/src/github.com/chihaya/bencode/decoder_test.go new file mode 100644 index 0000000..2ad47ae --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/decoder_test.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package bencode + +import ( + "reflect" + "testing" +) + +var unmarshalTests = []struct { + input string + expected interface{} +}{ + {"i42e", int64(42)}, + {"i-42e", int64(-42)}, + + {"7:example", "example"}, + + {"l3:one3:twoe", List{"one", "two"}}, + {"le", List{}}, + + {"d3:one2:aa3:two2:bbe", Dict{"one": "aa", "two": "bb"}}, + {"de", Dict{}}, +} + +func TestUnmarshal(t *testing.T) { + for _, test := range unmarshalTests { + got, err := Unmarshal([]byte(test.input)) + if err != nil { + t.Error(err) + } else if !reflect.DeepEqual(got, test.expected) { + t.Errorf("\ngot: %#v\nexpected: %#v", got, test.expected) + } + } +} + +type bufferLoop struct { + val string +} + +func (r *bufferLoop) Read(b []byte) (int, error) { + n := copy(b, r.val) + return n, nil +} + +func BenchmarkUnmarshalScalar(b *testing.B) { + d1 := NewDecoder(&bufferLoop{"7:example"}) + d2 := NewDecoder(&bufferLoop{"i42e"}) + + for i := 0; i < b.N; i++ { + d1.Decode() + d2.Decode() + } +} + +func TestUnmarshalLarge(t *testing.T) { + data := Dict{ + "k1": List{"a", "b", "c"}, + "k2": int64(42), + "k3": "val", + "k4": int64(-42), + } + buf, _ := Marshal(data) + dec := NewDecoder(&bufferLoop{string(buf)}) + + got, err := dec.Decode() + if err != nil { + t.Error(err) + } else if !reflect.DeepEqual(got, data) { + t.Errorf("\ngot: %#v\nexpected: %#v", got, data) + } +} + +func BenchmarkUnmarshalLarge(b *testing.B) { + data := map[string]interface{}{ + "k1": []string{"a", "b", "c"}, + "k2": 42, + "k3": "val", + "k4": uint(42), + } + buf, _ := Marshal(data) + dec := NewDecoder(&bufferLoop{string(buf)}) + + for i := 0; i < b.N; i++ { + dec.Decode() + } +} diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/encoder.go b/Godeps/_workspace/src/github.com/chihaya/bencode/encoder.go new file mode 100644 index 0000000..324d503 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/encoder.go @@ -0,0 +1,151 @@ +// Copyright 2014 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package bencode + +import ( + "bytes" + "fmt" + "io" + "strconv" + "time" +) + +// An Encoder writes bencoded objects to an output stream. +type Encoder struct { + w io.Writer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: w} +} + +// Encode writes the bencoding of v to the stream. +func (enc *Encoder) Encode(v interface{}) error { + return marshal(enc.w, v) +} + +// Marshal returns the bencoding of v. +func Marshal(v interface{}) ([]byte, error) { + buf := &bytes.Buffer{} + err := marshal(buf, v) + return buf.Bytes(), err +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves. +type Marshaler interface { + MarshalBencode() ([]byte, error) +} + +// marshal writes types bencoded to an io.Writer +func marshal(w io.Writer, data interface{}) error { + switch v := data.(type) { + case Marshaler: + bencoded, err := v.MarshalBencode() + if err != nil { + return err + } + _, err = w.Write(bencoded) + if err != nil { + return err + } + + case string: + marshalString(w, v) + + case int: + marshalInt(w, int64(v)) + + case uint: + marshalUint(w, uint64(v)) + + case int64: + marshalInt(w, v) + + case uint64: + marshalUint(w, v) + + case []byte: + marshalBytes(w, v) + + case time.Duration: // Assume seconds + marshalInt(w, int64(v/time.Second)) + + case Dict: + marshal(w, map[string]interface{}(v)) + + case []Dict: + w.Write([]byte{'l'}) + for _, val := range v { + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + case map[string]interface{}: + w.Write([]byte{'d'}) + for key, val := range v { + marshalString(w, key) + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + case []string: + w.Write([]byte{'l'}) + for _, val := range v { + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + case List: + marshal(w, []interface{}(v)) + + case []interface{}: + w.Write([]byte{'l'}) + for _, val := range v { + err := marshal(w, val) + if err != nil { + return err + } + } + w.Write([]byte{'e'}) + + default: + return fmt.Errorf("attempted to marshal unsupported type:\n%t", v) + } + + return nil +} + +func marshalInt(w io.Writer, v int64) { + w.Write([]byte{'i'}) + w.Write([]byte(strconv.FormatInt(v, 10))) + w.Write([]byte{'e'}) +} + +func marshalUint(w io.Writer, v uint64) { + w.Write([]byte{'i'}) + w.Write([]byte(strconv.FormatUint(v, 10))) + w.Write([]byte{'e'}) +} + +func marshalBytes(w io.Writer, v []byte) { + w.Write([]byte(strconv.Itoa(len(v)))) + w.Write([]byte{':'}) + w.Write(v) +} + +func marshalString(w io.Writer, v string) { + marshalBytes(w, []byte(v)) +} diff --git a/Godeps/_workspace/src/github.com/chihaya/bencode/encoder_test.go b/Godeps/_workspace/src/github.com/chihaya/bencode/encoder_test.go new file mode 100644 index 0000000..f665600 --- /dev/null +++ b/Godeps/_workspace/src/github.com/chihaya/bencode/encoder_test.go @@ -0,0 +1,70 @@ +// Copyright 2014 The Chihaya Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package bencode + +import ( + "bytes" + "testing" + "time" +) + +var marshalTests = []struct { + input interface{} + expected string +}{ + {int(42), "i42e"}, + {int(-42), "i-42e"}, + {uint(43), "i43e"}, + {int64(44), "i44e"}, + {uint64(45), "i45e"}, + + {"example", "7:example"}, + {[]byte("example"), "7:example"}, + {30 * time.Minute, "i1800e"}, + + {[]string{"one", "two"}, "l3:one3:twoe"}, + {[]interface{}{"one", "two"}, "l3:one3:twoe"}, + {[]string{}, "le"}, + + {map[string]interface{}{"one": "aa", "two": "bb"}, "d3:one2:aa3:two2:bbe"}, + {map[string]interface{}{}, "de"}, +} + +func TestMarshal(t *testing.T) { + for _, test := range marshalTests { + got, err := Marshal(test.input) + if err != nil { + t.Error(err) + } else if string(got) != test.expected { + t.Errorf("\ngot: %s\nexpected: %s", got, test.expected) + } + } +} + +func BenchmarkMarshalScalar(b *testing.B) { + buf := &bytes.Buffer{} + encoder := NewEncoder(buf) + + for i := 0; i < b.N; i++ { + encoder.Encode("test") + encoder.Encode(123) + } +} + +func BenchmarkMarshalLarge(b *testing.B) { + data := map[string]interface{}{ + "k1": []string{"a", "b", "c"}, + "k2": 42, + "k3": "val", + "k4": uint(42), + } + + buf := &bytes.Buffer{} + encoder := NewEncoder(buf) + + for i := 0; i < b.N; i++ { + encoder.Encode(data) + } +} diff --git a/Godeps/_workspace/src/github.com/golang/glog/LICENSE b/Godeps/_workspace/src/github.com/golang/glog/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Godeps/_workspace/src/github.com/golang/glog/README b/Godeps/_workspace/src/github.com/golang/glog/README new file mode 100644 index 0000000..5f9c114 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + http://code.google.com/p/google-glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog.go b/Godeps/_workspace/src/github.com/golang/glog/glog.go new file mode 100644 index 0000000..d5e1ac2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/glog/glog.go @@ -0,0 +1,1034 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `*?[]\`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + logging.filterLength = 0 + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity) *buffer { + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + now := timeNow() + _, file, line, ok := runtime.Caller(3) // It's always the same number of frames to the user's call. + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000) + buf.tmp[21] = ' ' + buf.nDigits(5, 22, pid) // TODO: should be TID + buf.tmp[27] = ' ' + buf.Write(buf.tmp[:28]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats a zero-prefixed n-digit integer at buf.tmp[i]. +func (buf *buffer) nDigits(n, i, d int) { + for j := n - 1; j >= 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf := l.header(s) + fmt.Fprintln(buf, args...) + l.output(s, buf) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + buf := l.header(s) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf := l.header(s) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer) { + l.mu.Lock() + if l.traceLocation.isSet() { + _, file, line, ok := runtime.Caller(3) // It's always the same number of frames to the user's call (same as header). + if ok && l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if l.toStderr { + os.Stderr.Write(data) + } else { + if l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // Make sure we see the trace for the current goroutine on standard error. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_file.go b/Godeps/_workspace/src/github.com/golang/glog/glog_file.go new file mode 100644 index 0000000..65075d2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/Godeps/_workspace/src/github.com/golang/glog/glog_test.go b/Godeps/_workspace/src/github.com/golang/glog/glog_test.go new file mode 100644 index 0000000..e4cac5a --- /dev/null +++ b/Godeps/_workspace/src/github.com/golang/glog/glog_test.go @@ -0,0 +1,333 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "fmt" + "path/filepath" + "runtime" + "strings" + "testing" + "time" +) + +// Test that shortHostname works as advertised. +func TestShortHostname(t *testing.T) { + for hostname, expect := range map[string]string{ + "": "", + "host": "host", + "host.google.com": "host", + } { + if got := shortHostname(hostname); expect != got { + t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) + } + } +} + +// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter. +type flushBuffer struct { + bytes.Buffer +} + +func (f *flushBuffer) Flush() error { + return nil +} + +func (f *flushBuffer) Sync() error { + return nil +} + +// swap sets the log writers and returns the old array. +func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { + l.mu.Lock() + defer l.mu.Unlock() + old = l.file + for i, w := range writers { + logging.file[i] = w + } + return +} + +// newBuffers sets the log writers to all new byte buffers and returns the old array. +func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { + return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) +} + +// contents returns the specified log value as a string. +func contents(s severity) string { + return logging.file[s].(*flushBuffer).String() +} + +// contains reports whether the string is contained in the log. +func contains(s severity, str string, t *testing.T) bool { + return strings.Contains(contents(s), str) +} + +// setFlags configures the logging flags how the test expects them. +func setFlags() { + logging.toStderr = false +} + +// Test that Info works as advertised. +func TestInfo(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that the header has the correct format. +func TestHeader(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + defer func(previous func() time.Time) { timeNow = previous }(timeNow) + timeNow = func() time.Time { + return time.Date(2006, 1, 2, 15, 4, 5, .678901e9, time.Local) + } + Info("test") + var line, pid int + n, err := fmt.Sscanf(contents(infoLog), "I0102 15:04:05.678901 %d glog_test.go:%d] test\n", &pid, &line) + if n != 2 || err != nil { + t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) + } +} + +// Test that an Error log goes to Warning and Info. +// Even in the Info log, the source character will be E, so the data should +// all be identical. +func TestError(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Error("test") + if !contains(errorLog, "E", t) { + t.Errorf("Error has wrong character: %q", contents(errorLog)) + } + if !contains(errorLog, "test", t) { + t.Error("Error failed") + } + str := contents(errorLog) + if !contains(warningLog, str, t) { + t.Error("Warning failed") + } + if !contains(infoLog, str, t) { + t.Error("Info failed") + } +} + +// Test that a Warning log goes to Info. +// Even in the Info log, the source character will be W, so the data should +// all be identical. +func TestWarning(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + Warning("test") + if !contains(warningLog, "W", t) { + t.Errorf("Warning has wrong character: %q", contents(warningLog)) + } + if !contains(warningLog, "test", t) { + t.Error("Warning failed") + } + str := contents(warningLog) + if !contains(infoLog, str, t) { + t.Error("Info failed") + } +} + +// Test that a V log goes to Info. +func TestV(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.verbosity.Set("2") + defer logging.verbosity.Set("0") + V(2).Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that a vmodule enables a log in this file. +func TestVmoduleOn(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.vmodule.Set("glog_test=2") + defer logging.vmodule.Set("") + if !V(1) { + t.Error("V not enabled for 1") + } + if !V(2) { + t.Error("V not enabled for 2") + } + if V(3) { + t.Error("V enabled for 3") + } + V(2).Info("test") + if !contains(infoLog, "I", t) { + t.Errorf("Info has wrong character: %q", contents(infoLog)) + } + if !contains(infoLog, "test", t) { + t.Error("Info failed") + } +} + +// Test that a vmodule of another file does not enable a log in this file. +func TestVmoduleOff(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + logging.vmodule.Set("notthisfile=2") + defer logging.vmodule.Set("") + for i := 1; i <= 3; i++ { + if V(Level(i)) { + t.Errorf("V enabled for %d", i) + } + } + V(2).Info("test") + if contents(infoLog) != "" { + t.Error("V logged incorrectly") + } +} + +// vGlobs are patterns that match/don't match this file at V=2. +var vGlobs = map[string]bool{ + // Easy to test the numeric match here. + "glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail. + "glog_test=2": true, + "glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed. + // These all use 2 and check the patterns. All are true. + "*=2": true, + "?l*=2": true, + "????_*=2": true, + "??[mno]?_*t=2": true, + // These all use 2 and check the patterns. All are false. + "*x=2": false, + "m*=2": false, + "??_*=2": false, + "?[abc]?_*t=2": false, +} + +// Test that vmodule globbing works as advertised. +func testVmoduleGlob(pat string, match bool, t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + defer logging.vmodule.Set("") + logging.vmodule.Set(pat) + if V(2) != Verbose(match) { + t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) + } +} + +// Test that a vmodule globbing works as advertised. +func TestVmoduleGlob(t *testing.T) { + for glob, match := range vGlobs { + testVmoduleGlob(glob, match, t) + } +} + +func TestRollover(t *testing.T) { + setFlags() + var err error + defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) + logExitFunc = func(e error) { + err = e + } + defer func(previous uint64) { MaxSize = previous }(MaxSize) + MaxSize = 512 + + Info("x") // Be sure we have a file. + info, ok := logging.file[infoLog].(*syncBuffer) + if !ok { + t.Fatal("info wasn't created") + } + if err != nil { + t.Fatalf("info has initial error: %v", err) + } + fname0 := info.file.Name() + Info(strings.Repeat("x", int(MaxSize))) // force a rollover + if err != nil { + t.Fatalf("info has error after big write: %v", err) + } + + // Make sure the next log file gets a file name with a different + // time stamp. + // + // TODO: determine whether we need to support subsecond log + // rotation. C++ does not appear to handle this case (nor does it + // handle Daylight Savings Time properly). + time.Sleep(1 * time.Second) + + Info("x") // create a new file + if err != nil { + t.Fatalf("error after rotation: %v", err) + } + fname1 := info.file.Name() + if fname0 == fname1 { + t.Errorf("info.f.Name did not change: %v", fname0) + } + if info.nbytes >= MaxSize { + t.Errorf("file size was not reset: %d", info.nbytes) + } +} + +func TestLogBacktraceAt(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + // The peculiar style of this code simplifies line counting and maintenance of the + // tracing block below. + var infoLine string + setTraceLocation := func(file string, line int, ok bool, delta int) { + if !ok { + t.Fatal("could not get file:line") + } + _, file = filepath.Split(file) + infoLine = fmt.Sprintf("%s:%d", file, line+delta) + err := logging.traceLocation.Set(infoLine) + if err != nil { + t.Fatal("error setting log_backtrace_at: ", err) + } + } + { + // Start of tracing block. These lines know about each other's relative position. + _, file, line, ok := runtime.Caller(0) + setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. + Info("we want a stack trace here") + } + numAppearances := strings.Count(contents(infoLog), infoLine) + if numAppearances < 2 { + // Need 2 appearances, one in the log header and one in the trace: + // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here + // ... + // github.com/glog/glog_test.go:280 (0x41ba91) + // ... + // We could be more precise but that would require knowing the details + // of the traceback format, which may not be dependable. + t.Fatal("got no trace back; log is ", contents(infoLog)) + } +} + +func BenchmarkHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + logging.putBuffer(logging.header(infoLog)) + } +} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml new file mode 100644 index 0000000..b0cf782 --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/.travis.yml @@ -0,0 +1,6 @@ +language: go +go: + - 1.1 + - 1.2 + - 1.3 + - tip diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE new file mode 100644 index 0000000..b829abc --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2013 Julien Schmidt. All rights reserved. + + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * The names of the contributors may not be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL JULIEN SCHMIDT BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md new file mode 100644 index 0000000..082e4cf --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/README.md @@ -0,0 +1,234 @@ +# HttpRouter [![Build Status](https://travis-ci.org/julienschmidt/httprouter.png?branch=master)](https://travis-ci.org/julienschmidt/httprouter) [![GoDoc](http://godoc.org/github.com/julienschmidt/httprouter?status.png)](http://godoc.org/github.com/julienschmidt/httprouter) + +HttpRouter is a lightweight high performance HTTP request router +(also called *multiplexer* or just *mux* for short) for [Go](http://golang.org/). + +In contrast to the default mux of Go's net/http package, this router supports +variables in the routing pattern and matches against the request method. +It also scales better. + +The router is optimized for best performance and a small memory footprint. +It scales well even with very long paths and a large number of routes. +A compressing dynamic trie (radix tree) structure is used for efficient matching. + +## Features +**Zero Garbage:** The matching and dispatching process generates zero bytes of +garbage. In fact, the only heap allocations that are made, is by building the +slice of the key-value pairs for path parameters. If the request path contains +no parameters, not a single heap allocation is necessary. + +**Best Performance:** [Benchmarks speak for themselves](https://github.com/julienschmidt/go-http-routing-benchmark). +See below for technical details of the implementation. + +**Parameters in your routing pattern:** Stop parsing the requested URL path, +just give the path segment a name and the router delivers the dynamic value to +you. Because of the design of the router, path parameters are very cheap. + +**Only explicit matches:** With other routers, like [http.ServeMux](http://golang.org/pkg/net/http/#ServeMux), +a requested URL path could match multiple patterns. Therefore they have some +awkward pattern priority rules, like *longest match* or *first registered, +first matched*. By design of this router, a request can only match exactly one +or no route. As a result, there are also no unintended matches, which makes it +great for SEO and improves the user experience. + +**Stop caring about trailing slashes:** Choose the URL style you like, the +router automatically redirects the client if a trailing slash is missing or if +there is one extra. Of course it only does so, if the new path has a handler. +If you don't like it, you can turn off this behavior. + +**Path auto-correction:** Besides detecting the missing or additional trailing +slash at no extra cost, the router can also fix wrong cases and remove +superfluous path elements (like `../` or `//`). +Is [CAPTAIN CAPS LOCK](http://www.urbandictionary.com/define.php?term=Captain+Caps+Lock) one of your users? +HttpRouter can help him by making a case-insensitive look-up and redirecting him +to the correct URL. + +**No more server crashes:** You can set a PanicHandler to deal with panics +occurring during handling a HTTP request. The router then recovers and lets the +PanicHandler log what happened and deliver a nice error page. + +Of course you can also set a **custom NotFound handler** and **serve static files**. + +## Usage +This is just a quick introduction, view the [GoDoc](http://godoc.org/github.com/julienschmidt/httprouter) for details. + +Let's start with a trivial example: +```go +package main + +import ( + "fmt" + "github.com/julienschmidt/httprouter" + "net/http" + "log" +) + +func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + fmt.Fprint(w, "Welcome!\n") +} + +func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) +} + +func main() { + router := httprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + log.Fatal(http.ListenAndServe(":8080", router)) +} +``` + +### Named parameters +As you can see, `:name` is a *named parameter*. +The values are accessible via `httprouter.Params`, which is just a slice of `httprouter.Param`s. +You can get the value of a parameter either by its index in the slice, or by using the `ByName(name)` method: +`:name` can be retrived by `ByName("name")`. + +Named parameters only match a single path segment: +``` +Pattern: /user/:user + + /user/gordon match + /user/you match + /user/gordon/profile no match + /user/ no match +``` + +**Note:** Since this router has only explicit matches, you can not register static routes and parameters for the same path segment. For example you can not register the patterns `/user/new` and `/user/:user` for the same request method at the same time. The routing of different request methods is independent from each other. + +### Catch-All parameters +The second type are *catch-all* parameters and have the form `*name`. +Like the name suggests, they match everything. +Therefore they must always be at the **end** of the pattern: +``` +Pattern: /src/*filepath + + /src/ match + /src/somefile.go match + /src/subdir/somefile.go match +``` + +## How does it work? +The router relies on a tree structure which makes heavy use of *common prefixes*, +it is basically a *compact* [*prefix tree*](http://en.wikipedia.org/wiki/Trie) +(or just [*Radix tree*](http://en.wikipedia.org/wiki/Radix_tree)). +Nodes with a common prefix also share a common parent. Here is a short example +what the routing tree for the `GET` request method could look like: + +``` +Priority Path Handle +9 \ *<1> +3 ├s nil +2 |├earch\ *<2> +1 |└upport\ *<3> +2 ├blog\ *<4> +1 | └:post nil +1 | └\ *<5> +2 ├about-us\ *<6> +1 | └team\ *<7> +1 └contact\ *<8> +``` +Every `*` represents the memory address of a handler function (a pointer). +If you follow a path trough the tree from the root to the leaf, you get the +complete route path, e.g `\blog\:post\`, where `:post` is just a placeholder +([*parameter*](#named-parameters)) for an actual post name. Unlike hash-maps, a +tree structure also allows us to use dynamic parts like the `:post` parameter, +since we actually match against the routing patterns instead of just comparing +hashes. [As benchmarks show](https://github.com/julienschmidt/go-http-routing-benchmark), +this works very well and efficient. + +Since URL paths have a hierarchical structure and make use only of a limited set +of characters (byte values), it is very likely that there are a lot of common +prefixes. This allows us to easily reduce the routing into ever smaller problems. +Moreover the router manages a separate tree for every request method. +For one thing it is more space efficient than holding a method->handle map in +every single node, for another thing is also allows us to greatly reduce the +routing problem before even starting the look-up in the prefix-tree. + +For even better scalability, the child nodes on each tree level are ordered by +priority, where the priority is just the number of handles registered in sub +nodes (children, grandchildren, and so on..). +This helps in two ways: + +1. Nodes which are part of the most routing paths are evaluated first. This +helps to make as much routes as possible to be reachable as fast as possible. +2. It is some sort of cost compensation. The longest reachable path (highest +cost) can always be evaluated first. The following scheme visualizes the tree +structure. Nodes are evaluated from top to bottom and from left to right. + +``` +├------------ +├--------- +├----- +├---- +├-- +├-- +└- +``` + + +## Why doesn't this work with http.Handler? +**It does!** The router itself implements the http.Handler interface. +Moreover the router provides convenient [adapters for http.Handler](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handler)s and [http.HandlerFunc](http://godoc.org/github.com/julienschmidt/httprouter#Router.HandlerFunc)s +which allows them to be used as a [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) when registering a route. +The only disadvantage is, that no parameter values can be retrieved when a +http.Handler or http.HandlerFunc is used, since there is no efficient way to +pass the values with the existing function parameters. +Therefore [httprouter.Handle](http://godoc.org/github.com/julienschmidt/httprouter#Router.Handle) has a third function parameter. + +Just try it out for yourself, the usage of HttpRouter is very straightforward. The package is compact and minimalistic, but also probably one of the easiest routers to set up. + + +## Where can I find Middleware *X*? +This package just provides a very efficient request router with a few extra +features. The router is just a [http.Handler](http://golang.org/pkg/net/http/#Handler), +you can chain any http.Handler compatible middleware before the router, +for example the [Gorilla handlers](http://www.gorillatoolkit.org/pkg/handlers). +Or you could [just write your own](http://justinas.org/writing-http-middleware-in-go/), +it's very easy! + +Alternatively, you could try [a framework building upon HttpRouter](#web-frameworks-building-upon-httprouter). + +Here is a quick example: Does your server serve multiple domains / hosts? +You want to use sub-domains? +Define a router per host! +```go +// We need an object that implements the http.Handler interface. +// Therefore we need a type for which we implement the ServeHTTP method. +// We just use a map here, in which we map host names (with port) to http.Handlers +type HostSwitch map[string]http.Handler + +// Implement the ServerHTTP method on our new type +func (hs HostSwitch) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Check if a http.Handler is registered for the given host. + // If yes, use it to handle the request. + if handler := hs[r.Host]; handler != nil { + handler.ServeHTTP(w, r) + } else { + // Handle host names for wich no handler is registered + http.Error(w, "Forbidden", 403) // Or Redirect? + } +} + +func main() { + // Initialize a router as usual + router := httprouter.New() + router.GET("/", Index) + router.GET("/hello/:name", Hello) + + // Make a new HostSwitch and insert the router (our http handler) + // for example.com and port 12345 + hs := make(HostSwitch) + hs["example.com:12345"] = router + + // Use the HostSwitch to listen and serve on port 12345 + log.Fatal(http.ListenAndServe(":12345", hs)) +} +``` + +## Web Frameworks building upon HttpRouter +If the HttpRouter is a bit too minimalistic for you, you might try one of the following more high-level 3rd-party web frameworks building upon the HttpRouter package: +* [Gin](https://github.com/gin-gonic/gin): Features a martini-like API with much better performance +* [Hikaru](https://github.com/najeira/hikaru): Supports standalone and Google AppEngine diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go new file mode 100644 index 0000000..486134d --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path.go @@ -0,0 +1,123 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +// CleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned +func CleanPath(p string) string { + // Turn empty string into "/" + if p == "" { + return "/" + } + + n := len(p) + var buf []byte + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + buf = make([]byte, n+1) + buf[0] = '/' + } + + trailing := n > 2 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp). So in contrast to the path package this + // loop has no expensive function calls (except 1x make) + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r++ + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 2 + + if w > 1 { + // can backtrack + w-- + + if buf == nil { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // real path element. + // add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + if buf == nil { + return p[:w] + } + return string(buf[:w]) +} + +// internal helper to lazily create a buffer if necessary +func bufApp(buf *[]byte, s string, w int, c byte) { + if *buf == nil { + if s[w] == c { + return + } + + *buf = make([]byte, len(s)) + copy(*buf, s[:w]) + } + (*buf)[w] = c +} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go new file mode 100644 index 0000000..c4ceda5 --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/path_test.go @@ -0,0 +1,92 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "runtime" + "testing" +) + +var cleanTests = []struct { + path, result string +}{ + // Already clean + {"/", "/"}, + {"/abc", "/abc"}, + {"/a/b/c", "/a/b/c"}, + {"/abc/", "/abc/"}, + {"/a/b/c/", "/a/b/c/"}, + + // missing root + {"", "/"}, + {"abc", "/abc"}, + {"abc/def", "/abc/def"}, + {"a/b/c", "/a/b/c"}, + + // Remove doubled slash + {"//", "/"}, + {"/abc//", "/abc/"}, + {"/abc/def//", "/abc/def/"}, + {"/a/b/c//", "/a/b/c/"}, + {"/abc//def//ghi", "/abc/def/ghi"}, + {"//abc", "/abc"}, + {"///abc", "/abc"}, + {"//abc//", "/abc/"}, + + // Remove . elements + {".", "/"}, + {"./", "/"}, + {"/abc/./def", "/abc/def"}, + {"/./abc/def", "/abc/def"}, + {"/abc/.", "/abc/"}, + + // Remove .. elements + {"..", "/"}, + {"../", "/"}, + {"../../", "/"}, + {"../..", "/"}, + {"../../abc", "/abc"}, + {"/abc/def/ghi/../jkl", "/abc/def/jkl"}, + {"/abc/def/../ghi/../jkl", "/abc/jkl"}, + {"/abc/def/..", "/abc"}, + {"/abc/def/../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../..", "/"}, + {"/abc/def/../../../ghi/jkl/../../../mno", "/mno"}, + + // Combinations + {"abc/./../def", "/def"}, + {"abc//./../def", "/def"}, + {"abc/../../././../def", "/def"}, +} + +func TestPathClean(t *testing.T) { + for _, test := range cleanTests { + if s := CleanPath(test.path); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.path, s, test.result) + } + if s := CleanPath(test.result); s != test.result { + t.Errorf("CleanPath(%q) = %q, want %q", test.result, s, test.result) + } + } +} + +func TestPathCleanMallocs(t *testing.T) { + if testing.Short() { + t.Skip("skipping malloc count in short mode") + } + if runtime.GOMAXPROCS(0) > 1 { + t.Log("skipping AllocsPerRun checks; GOMAXPROCS>1") + return + } + + for _, test := range cleanTests { + allocs := testing.AllocsPerRun(100, func() { CleanPath(test.result) }) + if allocs > 0 { + t.Errorf("CleanPath(%q): %v allocs, want zero", test.result, allocs) + } + } +} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go new file mode 100644 index 0000000..3f3d163 --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router.go @@ -0,0 +1,317 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +// Package httprouter is a trie based high performance HTTP request router. +// +// A trivial example is: +// +// package main +// +// import ( +// "fmt" +// "github.com/julienschmidt/httprouter" +// "net/http" +// "log" +// ) +// +// func Index(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { +// fmt.Fprint(w, "Welcome!\n") +// } +// +// func Hello(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { +// fmt.Fprintf(w, "hello, %s!\n", ps.ByName("name")) +// } +// +// func main() { +// router := httprouter.New() +// router.GET("/", Index) +// router.GET("/hello/:name", Hello) +// +// log.Fatal(http.ListenAndServe(":8080", router)) +// } +// +// The router matches incoming requests by the request method and the path. +// If a handle is registered for this path and method, the router delegates the +// request to that function. +// For the methods GET, POST, PUT, PATCH and DELETE shortcut functions exist to +// register handles, for all other methods router.Handle can be used. +// +// The registered path, against which the router matches incoming requests, can +// contain two types of parameters: +// Syntax Type +// :name named parameter +// *name catch-all parameter +// +// Named parameters are dynamic path segments. They match anything until the +// next '/' or the path end: +// Path: /blog/:category/:post +// +// Requests: +// /blog/go/request-routers match: category="go", post="request-routers" +// /blog/go/request-routers/ no match, but the router would redirect +// /blog/go/ no match +// /blog/go/request-routers/comments no match +// +// Catch-all parameters match anything until the path end, including the +// directory index (the '/' before the catch-all). Since they match anything +// until the end, catch-all paramerters must always be the final path element. +// Path: /files/*filepath +// +// Requests: +// /files/ match: filepath="/" +// /files/LICENSE match: filepath="/LICENSE" +// /files/templates/article.html match: filepath="/templates/article.html" +// /files no match, but the router would redirect +// +// The value of parameters is saved as a slice of the Param struct, consisting +// each of a key and a value. The slice is passed to the Handle func as a third +// parameter. +// There are two ways to retrieve the value of a parameter: +// // by the name of the parameter +// user := ps.ByName("user") // defined by :user or *user +// +// // by the index of the parameter. This way you can also get the name (key) +// thirdKey := ps[2].Key // the name of the 3rd parameter +// thirdValue := ps[2].Value // the value of the 3rd parameter +package httprouter + +import ( + "net/http" +) + +// Handle is a function that can be registered to a route to handle HTTP +// requests. Like http.HandlerFunc, but has a third parameter for the values of +// wildcards (variables). +type Handle func(http.ResponseWriter, *http.Request, Params) + +// Param is a single URL parameter, consisting of a key and a value. +type Param struct { + Key string + Value string +} + +// Params is a Param-slice, as returned by the router. +// The slice is ordered, the first URL parameter is also the first slice value. +// It is therefore safe to read values by the index. +type Params []Param + +// ByName returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) ByName(name string) string { + for i := range ps { + if ps[i].Key == name { + return ps[i].Value + } + } + return "" +} + +// Router is a http.Handler which can be used to dispatch requests to different +// handler functions via configurable routes +type Router struct { + trees map[string]*node + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // Configurable http.HandlerFunc which is called when no matching route is + // found. If it is not set, http.NotFound is used. + NotFound http.HandlerFunc + + // Function to handle panics recovered from http handlers. + // It should be used to generate a error page and return the http error code + // 500 (Internal Server Error). + // The handler can be used to keep your server from crashing because of + // unrecovered panics. + PanicHandler func(http.ResponseWriter, *http.Request, interface{}) +} + +// Make sure the Router conforms with the http.Handler interface +var _ http.Handler = New() + +// New returns a new initialized Router. +// Path auto-correction, including trailing slashes, is enabled by default. +func New() *Router { + return &Router{ + RedirectTrailingSlash: true, + RedirectFixedPath: true, + } +} + +// GET is a shortcut for router.Handle("GET", path, handle) +func (r *Router) GET(path string, handle Handle) { + r.Handle("GET", path, handle) +} + +// POST is a shortcut for router.Handle("POST", path, handle) +func (r *Router) POST(path string, handle Handle) { + r.Handle("POST", path, handle) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle) +func (r *Router) PUT(path string, handle Handle) { + r.Handle("PUT", path, handle) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle) +func (r *Router) PATCH(path string, handle Handle) { + r.Handle("PATCH", path, handle) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle) +func (r *Router) DELETE(path string, handle Handle) { + r.Handle("DELETE", path, handle) +} + +// Handle registers a new request handle with the given path and method. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (r *Router) Handle(method, path string, handle Handle) { + if path[0] != '/' { + panic("path must begin with '/'") + } + + if r.trees == nil { + r.trees = make(map[string]*node) + } + + root := r.trees[method] + if root == nil { + root = new(node) + r.trees[method] = root + } + + root.addRoute(path, handle) +} + +// Handler is an adapter which allows the usage of an http.Handler as a +// request handle. +func (r *Router) Handler(method, path string, handler http.Handler) { + r.Handle(method, path, + func(w http.ResponseWriter, req *http.Request, _ Params) { + handler.ServeHTTP(w, req) + }, + ) +} + +// HandlerFunc is an adapter which allows the usage of an http.HandlerFunc as a +// request handle. +func (r *Router) HandlerFunc(method, path string, handler http.HandlerFunc) { + r.Handle(method, path, + func(w http.ResponseWriter, req *http.Request, _ Params) { + handler(w, req) + }, + ) +} + +// ServeFiles serves files from the given file system root. +// The path must end with "/*filepath", files are then served from the local +// path /defined/root/dir/*filepath. +// For example if root is "/etc" and *filepath is "passwd", the local file +// "/etc/passwd" would be served. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use http.Dir: +// router.ServeFiles("/src/*filepath", http.Dir("/var/www")) +func (r *Router) ServeFiles(path string, root http.FileSystem) { + if len(path) < 10 || path[len(path)-10:] != "/*filepath" { + panic("path must end with /*filepath") + } + + fileServer := http.FileServer(root) + + r.GET(path, func(w http.ResponseWriter, req *http.Request, ps Params) { + req.URL.Path = ps.ByName("filepath") + fileServer.ServeHTTP(w, req) + }) +} + +func (r *Router) recv(w http.ResponseWriter, req *http.Request) { + if rcv := recover(); rcv != nil { + r.PanicHandler(w, req, rcv) + } +} + +// Lookup allows the manual lookup of a method + path combo. +// This is e.g. useful to build a framework around this router. +func (r *Router) Lookup(method, path string) (Handle, Params, bool) { + if root := r.trees[method]; root != nil { + return root.getValue(path) + } + return nil, nil, false +} + +// ServeHTTP makes the router implement the http.Handler interface. +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if r.PanicHandler != nil { + defer r.recv(w, req) + } + + if root := r.trees[req.Method]; root != nil { + path := req.URL.Path + + if handle, ps, tsr := root.getValue(path); handle != nil { + handle(w, req, ps) + return + } else if req.Method != "CONNECT" && path != "/" { + code := 301 // Permanent redirect, request with GET method + if req.Method != "GET" { + // Temporary redirect, request with same method + // As of Go 1.3, Go does not support status code 308. + code = 307 + } + + if tsr && r.RedirectTrailingSlash { + if path[len(path)-1] == '/' { + req.URL.Path = path[:len(path)-1] + } else { + req.URL.Path = path + "/" + } + http.Redirect(w, req, req.URL.String(), code) + return + } + + // Try to fix the request path + if r.RedirectFixedPath { + fixedPath, found := root.findCaseInsensitivePath( + CleanPath(path), + r.RedirectTrailingSlash, + ) + if found { + req.URL.Path = string(fixedPath) + http.Redirect(w, req, req.URL.String(), code) + return + } + } + } + } + + // Handle 404 + if r.NotFound != nil { + r.NotFound(w, req) + } else { + http.NotFound(w, req) + } +} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go new file mode 100644 index 0000000..ca59066 --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/router_test.go @@ -0,0 +1,329 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "errors" + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "testing" +) + +type mockResponseWriter struct{} + +func (m *mockResponseWriter) Header() (h http.Header) { + return http.Header{} +} + +func (m *mockResponseWriter) Write(p []byte) (n int, err error) { + return len(p), nil +} + +func (m *mockResponseWriter) WriteString(s string) (n int, err error) { + return len(s), nil +} + +func (m *mockResponseWriter) WriteHeader(int) {} + +func TestParams(t *testing.T) { + ps := Params{ + Param{"param1", "value1"}, + Param{"param2", "value2"}, + Param{"param3", "value3"}, + } + for i := range ps { + if val := ps.ByName(ps[i].Key); val != ps[i].Value { + t.Errorf("Wrong value for %s: Got %s; Want %s", ps[i].Key, val, ps[i].Value) + } + } + if val := ps.ByName("noKey"); val != "" { + t.Errorf("Expected empty string for not found key; got: %s", val) + } +} + +func TestRouter(t *testing.T) { + router := New() + + routed := false + router.Handle("GET", "/user/:name", func(w http.ResponseWriter, r *http.Request, ps Params) { + routed = true + want := Params{Param{"name", "gopher"}} + if !reflect.DeepEqual(ps, want) { + t.Fatalf("wrong wildcard values: want %v, got %v", want, ps) + } + }) + + w := new(mockResponseWriter) + + req, _ := http.NewRequest("GET", "/user/gopher", nil) + router.ServeHTTP(w, req) + + if !routed { + t.Fatal("routing failed") + } +} + +type handlerStruct struct { + handeled *bool +} + +func (h handlerStruct) ServeHTTP(w http.ResponseWriter, r *http.Request) { + *h.handeled = true +} + +func TestRouterAPI(t *testing.T) { + var get, post, put, patch, delete, handler, handlerFunc bool + + httpHandler := handlerStruct{&handler} + + router := New() + router.GET("/GET", func(w http.ResponseWriter, r *http.Request, _ Params) { + get = true + }) + router.POST("/POST", func(w http.ResponseWriter, r *http.Request, _ Params) { + post = true + }) + router.PUT("/PUT", func(w http.ResponseWriter, r *http.Request, _ Params) { + put = true + }) + router.PATCH("/PATCH", func(w http.ResponseWriter, r *http.Request, _ Params) { + patch = true + }) + router.DELETE("/DELETE", func(w http.ResponseWriter, r *http.Request, _ Params) { + delete = true + }) + router.Handler("GET", "/Handler", httpHandler) + router.HandlerFunc("GET", "/HandlerFunc", func(w http.ResponseWriter, r *http.Request) { + handlerFunc = true + }) + + w := new(mockResponseWriter) + + r, _ := http.NewRequest("GET", "/GET", nil) + router.ServeHTTP(w, r) + if !get { + t.Error("routing GET failed") + } + + r, _ = http.NewRequest("POST", "/POST", nil) + router.ServeHTTP(w, r) + if !post { + t.Error("routing POST failed") + } + + r, _ = http.NewRequest("PUT", "/PUT", nil) + router.ServeHTTP(w, r) + if !put { + t.Error("routing PUT failed") + } + + r, _ = http.NewRequest("PATCH", "/PATCH", nil) + router.ServeHTTP(w, r) + if !patch { + t.Error("routing PATCH failed") + } + + r, _ = http.NewRequest("DELETE", "/DELETE", nil) + router.ServeHTTP(w, r) + if !delete { + t.Error("routing DELETE failed") + } + + r, _ = http.NewRequest("GET", "/Handler", nil) + router.ServeHTTP(w, r) + if !handler { + t.Error("routing Handler failed") + } + + r, _ = http.NewRequest("GET", "/HandlerFunc", nil) + router.ServeHTTP(w, r) + if !handlerFunc { + t.Error("routing HandlerFunc failed") + } +} + +func TestRouterRoot(t *testing.T) { + router := New() + recv := catchPanic(func() { + router.GET("noSlashRoot", nil) + }) + if recv == nil { + t.Fatal("registering path not beginning with '/' did not panic") + } +} + +func TestRouterNotFound(t *testing.T) { + handlerFunc := func(_ http.ResponseWriter, _ *http.Request, _ Params) {} + + router := New() + router.GET("/path", handlerFunc) + router.GET("/dir/", handlerFunc) + + testRoutes := []struct { + route string + code int + header string + }{ + {"/path/", 301, "map[Location:[/path]]"}, // TSR -/ + {"/dir", 301, "map[Location:[/dir/]]"}, // TSR +/ + {"/PATH", 301, "map[Location:[/path]]"}, // Fixed Case + {"/DIR/", 301, "map[Location:[/dir/]]"}, // Fixed Case + {"/PATH/", 301, "map[Location:[/path]]"}, // Fixed Case -/ + {"/DIR", 301, "map[Location:[/dir/]]"}, // Fixed Case +/ + {"/../path", 301, "map[Location:[/path]]"}, // CleanPath + {"/nope", 404, ""}, // NotFound + } + for _, tr := range testRoutes { + r, _ := http.NewRequest("GET", tr.route, nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == tr.code && (w.Code == 404 || fmt.Sprint(w.Header()) == tr.header)) { + t.Errorf("NotFound handling route %s failed: Code=%d, Header=%v", tr.route, w.Code, w.Header()) + } + } + + // Test custom not found handler + var notFound bool + router.NotFound = func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(404) + notFound = true + } + r, _ := http.NewRequest("GET", "/nope", nil) + w := httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 404 && notFound == true) { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + // Test other method than GET (want 307 instead of 301) + router.PATCH("/path", handlerFunc) + r, _ = http.NewRequest("PATCH", "/path/", nil) + w = httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 307 && fmt.Sprint(w.Header()) == "map[Location:[/path]]") { + t.Errorf("Custom NotFound handler failed: Code=%d, Header=%v", w.Code, w.Header()) + } + + // Test special case where no node for the prefix "/" exists + router = New() + router.GET("/a", handlerFunc) + r, _ = http.NewRequest("GET", "/", nil) + w = httptest.NewRecorder() + router.ServeHTTP(w, r) + if !(w.Code == 404) { + t.Errorf("NotFound handling route / failed: Code=%d", w.Code) + } +} + +func TestRouterPanicHandler(t *testing.T) { + router := New() + panicHandled := false + + router.PanicHandler = func(rw http.ResponseWriter, r *http.Request, p interface{}) { + panicHandled = true + } + + router.Handle("PUT", "/user/:name", func(_ http.ResponseWriter, _ *http.Request, _ Params) { + panic("oops!") + }) + + w := new(mockResponseWriter) + req, _ := http.NewRequest("PUT", "/user/gopher", nil) + + defer func() { + if rcv := recover(); rcv != nil { + t.Fatal("handling panic failed") + } + }() + + router.ServeHTTP(w, req) + + if !panicHandled { + t.Fatal("simulating failed") + } +} + +func TestRouterLookup(t *testing.T) { + routed := false + wantHandle := func(_ http.ResponseWriter, _ *http.Request, _ Params) { + routed = true + } + wantParams := Params{Param{"name", "gopher"}} + + router := New() + + // try empty router first + handle, _, tsr := router.Lookup("GET", "/nope") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } + + // insert route and try again + router.GET("/user/:name", wantHandle) + + handle, params, tsr := router.Lookup("GET", "/user/gopher") + if handle == nil { + t.Fatal("Got no handle!") + } else { + handle(nil, nil, nil) + if !routed { + t.Fatal("Routing failed!") + } + } + + if !reflect.DeepEqual(params, wantParams) { + t.Fatalf("Wrong parameter values: want %v, got %v", wantParams, params) + } + + handle, _, tsr = router.Lookup("GET", "/user/gopher/") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if !tsr { + t.Error("Got no TSR recommendation!") + } + + handle, _, tsr = router.Lookup("GET", "/nope") + if handle != nil { + t.Fatalf("Got handle for unregistered pattern: %v", handle) + } + if tsr { + t.Error("Got wrong TSR recommendation!") + } +} + +type mockFileSystem struct { + opened bool +} + +func (mfs *mockFileSystem) Open(name string) (http.File, error) { + mfs.opened = true + return nil, errors.New("this is just a mock") +} + +func TestRouterServeFiles(t *testing.T) { + router := New() + mfs := &mockFileSystem{} + + recv := catchPanic(func() { + router.ServeFiles("/noFilepath", mfs) + }) + if recv == nil { + t.Fatal("registering path not ending with '*filepath' did not panic") + } + + router.ServeFiles("/*filepath", mfs) + w := new(mockResponseWriter) + r, _ := http.NewRequest("GET", "/favicon.ico", nil) + router.ServeHTTP(w, r) + if !mfs.opened { + t.Error("serving file failed") + } +} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go new file mode 100644 index 0000000..933b5cb --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree.go @@ -0,0 +1,534 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "strings" + "unicode" +) + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func countParams(path string) uint8 { + var n uint + for i := 0; i < len(path); i++ { + if path[i] != ':' && path[i] != '*' { + continue + } + n++ + } + if n >= 255 { + return 255 + } + return uint8(n) +} + +type nodeType uint8 + +const ( + static nodeType = 0 + param nodeType = 1 + catchAll nodeType = 2 +) + +type node struct { + path string + wildChild bool + nType nodeType + maxParams uint8 + indices []byte + children []*node + handle Handle + priority uint32 +} + +// increments priority of the given child and reorders if necessary +func (n *node) incrementChildPrio(i int) int { + n.children[i].priority++ + prio := n.children[i].priority + + // adjust position (move to front) + for j := i - 1; j >= 0 && n.children[j].priority < prio; j-- { + // swap node positions + tmpN := n.children[j] + n.children[j] = n.children[i] + n.children[i] = tmpN + tmpI := n.indices[j] + n.indices[j] = n.indices[i] + n.indices[i] = tmpI + + i-- + } + return i +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handle Handle) { + n.priority++ + numParams := countParams(path) + + // non-empty tree + if len(n.path) > 0 || len(n.children) > 0 { + WALK: + for { + // Update maxParams of the current node + if numParams > n.maxParams { + n.maxParams = numParams + } + + // Find the longest common prefix. + // This also implies that the commom prefix contains no ':' or '*' + // since the existing key can't contain this chars. + i := 0 + for max := min(len(path), len(n.path)); i < max && path[i] == n.path[i]; i++ { + } + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handle: n.handle, + priority: n.priority - 1, + } + + // Update maxParams (max of all children) + for i := range child.children { + if child.children[i].maxParams > child.maxParams { + child.maxParams = child.children[i].maxParams + } + } + + n.children = []*node{&child} + n.indices = []byte{n.path[i]} + n.path = path[:i] + n.handle = nil + n.wildChild = false + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + + if n.wildChild { + n = n.children[0] + n.priority++ + + // Update maxParams of the child node + if numParams > n.maxParams { + n.maxParams = numParams + } + numParams-- + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] { + // check for longer wildcard, e.g. :name and :names + if len(n.path) >= len(path) || path[len(n.path)] == '/' { + continue WALK + } + } + + panic("conflict with wildcard route") + } + + c := path[0] + + // slash after param + if n.nType == param && c == '/' && len(n.children) == 1 { + n = n.children[0] + n.priority++ + continue WALK + } + + // Check if a child with the next path byte exists + for i, index := range n.indices { + if c == index { + i = n.incrementChildPrio(i) + n = n.children[i] + continue WALK + } + } + + // Otherwise insert it + if c != ':' && c != '*' { + n.indices = append(n.indices, c) + child := &node{ + maxParams: numParams, + } + n.children = append(n.children, child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } + n.insertChild(numParams, path, handle) + return + + } else if i == len(path) { // Make node a (in-path) leaf + if n.handle != nil { + panic("a Handle is already registered for this path") + } + n.handle = handle + } + return + } + } else { // Empty tree + n.insertChild(numParams, path, handle) + } +} + +func (n *node) insertChild(numParams uint8, path string, handle Handle) { + var offset int + + // find prefix until first wildcard (beginning with ':'' or '*'') + for i, max := 0, len(path); numParams > 0; i++ { + c := path[i] + if c != ':' && c != '*' { + continue + } + + // Check if this Node existing children which would be + // unreachable if we insert the wildcard here + if len(n.children) > 0 { + panic("wildcard route conflicts with existing children") + } + + // find wildcard end (either '/' or path end) + end := i + 1 + for end < max && path[end] != '/' { + end++ + } + + if end-i < 2 { + panic("wildcards must be named with a non-empty name") + } + + if c == ':' { // param + // split path at the beginning of the wildcard + if i > 0 { + n.path = path[offset:i] + offset = i + } + + child := &node{ + nType: param, + maxParams: numParams, + } + n.children = []*node{child} + n.wildChild = true + n = child + n.priority++ + numParams-- + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if end < max { + n.path = path[offset:end] + offset = end + + child := &node{ + maxParams: numParams, + priority: 1, + } + n.children = []*node{child} + n = child + } + + } else { // catchAll + if end != max || numParams > 1 { + panic("catch-all routes are only allowed at the end of the path") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all") + } + + n.path = path[offset:i] + + // first node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + maxParams: 1, + } + n.children = []*node{child} + n.indices = []byte{path[i]} + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + maxParams: 1, + handle: handle, + priority: 1, + } + n.children = []*node{child} + + return + } + } + + // insert remaining path part and handle to the leaf + n.path = path[offset:] + n.handle = handle +} + +// Returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string) (handle Handle, p Params, tsr bool) { +walk: // Outer loop for walking the tree + for { + if len(path) > len(n.path) { + if path[:len(n.path)] == n.path { + path = path[len(n.path):] + // If this node does not have a wildcard (param or catchAll) + // child, we can just look up the next child node and continue + // to walk down the tree + if !n.wildChild { + c := path[0] + for i, index := range n.indices { + if c == index { + n = n.children[i] + continue walk + } + } + + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + tsr = (path == "/" && n.handle != nil) + return + + } + + // handle wildcard child + n = n.children[0] + switch n.nType { + case param: + // find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // save param value + if p == nil { + // lazy allocation + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[1:] + p[i].Value = path[:end] + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + tsr = (len(path) == end+1) + return + } + + if handle = n.handle; handle != nil { + return + } else if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + tsr = (n.path == "/" && n.handle != nil) + } + + return + + case catchAll: + // save param value + if p == nil { + // lazy allocation + p = make(Params, 0, n.maxParams) + } + i := len(p) + p = p[:i+1] // expand slice within preallocated capacity + p[i].Key = n.path[2:] + p[i].Value = path + + handle = n.handle + return + + default: + panic("Unknown node type") + } + } + } else if path == n.path { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if handle = n.handle; handle != nil { + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i, index := range n.indices { + if index == '/' { + n = n.children[i] + tsr = (n.path == "/" && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + tsr = (path == "/") || + (len(n.path) == len(path)+1 && n.path[len(path)] == '/' && + path == n.path[:len(n.path)-1] && n.handle != nil) + return + } +} + +// Makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating wether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) (ciPath []byte, found bool) { + ciPath = make([]byte, 0, len(path)+1) // preallocate enough memory + + // Outer loop for walking the tree + for len(path) >= len(n.path) && strings.ToLower(path[:len(n.path)]) == strings.ToLower(n.path) { + path = path[len(n.path):] + ciPath = append(ciPath, n.path...) + + if len(path) > 0 { + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + r := unicode.ToLower(rune(path[0])) + for i, index := range n.indices { + // must use recursive approach since both index and + // ToLower(index) could exist. We must check both. + if r == unicode.ToLower(rune(index)) { + out, found := n.children[i].findCaseInsensitivePath(path, fixTrailingSlash) + if found { + return append(ciPath, out...), true + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + found = (fixTrailingSlash && path == "/" && n.handle != nil) + return + + } else { + n = n.children[0] + + switch n.nType { + case param: + // find param end (either '/' or path end) + k := 0 + for k < len(path) && path[k] != '/' { + k++ + } + + // add param value to case insensitive path + ciPath = append(ciPath, path[:k]...) + + // we need to go deeper! + if k < len(path) { + if len(n.children) > 0 { + path = path[k:] + n = n.children[0] + continue + } else { // ... but we can't + if fixTrailingSlash && len(path) == k+1 { + return ciPath, true + } + return + } + } + + if n.handle != nil { + return ciPath, true + } else if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handle != nil { + return append(ciPath, '/'), true + } + } + return + + case catchAll: + return append(ciPath, path...), true + + default: + panic("Unknown node type") + } + } + } else { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handle != nil { + return ciPath, true + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i, index := range n.indices { + if index == '/' { + n = n.children[i] + if (n.path == "/" && n.handle != nil) || + (n.nType == catchAll && n.children[0].handle != nil) { + return append(ciPath, '/'), true + } + return + } + } + } + return + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath, true + } + if len(path)+1 == len(n.path) && n.path[len(path)] == '/' && + strings.ToLower(path) == strings.ToLower(n.path[:len(path)]) && + n.handle != nil { + return append(ciPath, n.path...), true + } + } + return +} diff --git a/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go new file mode 100644 index 0000000..cf4d170 --- /dev/null +++ b/Godeps/_workspace/src/github.com/julienschmidt/httprouter/tree_test.go @@ -0,0 +1,559 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package httprouter + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "testing" +) + +func printChildren(n *node, prefix string) { + fmt.Printf(" %02d:%02d %s%s[%d] %v %t %d \r\n", n.priority, n.maxParams, prefix, n.path, len(n.children), n.handle, n.wildChild, n.nType) + for l := len(n.path); l > 0; l-- { + prefix += " " + } + for _, child := range n.children { + printChildren(child, prefix) + } +} + +// Used as a workaround since we can't compare functions or their adresses +var fakeHandlerValue string + +func fakeHandler(val string) Handle { + return func(http.ResponseWriter, *http.Request, Params) { + fakeHandlerValue = val + } +} + +type testRequests []struct { + path string + nilHandler bool + route string + ps Params +} + +func checkRequests(t *testing.T, tree *node, requests testRequests) { + for _, request := range requests { + handler, ps, _ := tree.getValue(request.path) + + if handler == nil { + if !request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected non-nil handle", request.path) + } + } else if request.nilHandler { + t.Errorf("handle mismatch for route '%s': Expected nil handle", request.path) + } else { + handler(nil, nil, nil) + if fakeHandlerValue != request.route { + t.Errorf("handle mismatch for route '%s': Wrong handle (%s != %s)", request.path, fakeHandlerValue, request.route) + } + } + + if !reflect.DeepEqual(ps, request.ps) { + t.Errorf("Params mismatch for route '%s'", request.path) + } + } +} + +func checkPriorities(t *testing.T, n *node) uint32 { + var prio uint32 + for i := range n.children { + prio += checkPriorities(t, n.children[i]) + } + + if n.handle != nil { + prio++ + } + + if n.priority != prio { + t.Errorf( + "priority mismatch for node '%s': is %d, should be %d", + n.path, n.priority, prio, + ) + } + + return prio +} + +func checkMaxParams(t *testing.T, n *node) uint8 { + var maxParams uint8 + for i := range n.children { + params := checkMaxParams(t, n.children[i]) + if params > maxParams { + maxParams = params + } + } + if n.nType != static && !n.wildChild { + maxParams++ + } + + if n.maxParams != maxParams { + t.Errorf( + "maxParams mismatch for node '%s': is %d, should be %d", + n.path, n.maxParams, maxParams, + ) + } + + return maxParams +} + +func TestCountParams(t *testing.T) { + if countParams("/path/:param1/static/*catch-all") != 2 { + t.Fail() + } + if countParams(strings.Repeat("/:param", 256)) != 255 { + t.Fail() + } +} + +func TestTreeAddAndGet(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/contact", + "/co", + "/c", + "/a", + "/ab", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/a", false, "/a", nil}, + {"/", true, "", nil}, + {"/hi", false, "/hi", nil}, + {"/contact", false, "/contact", nil}, + {"/co", false, "/co", nil}, + {"/con", true, "", nil}, // key mismatch + {"/cona", true, "", nil}, // key mismatch + {"/no", true, "", nil}, // no matching child + {"/ab", false, "/ab", nil}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func TestTreeWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/cmd/:tool/:sub", + "/cmd/:tool/", + "/src/*filepath", + "/search/", + "/search/:query", + "/user_:name", + "/user_:name/about", + "/files/:dir/*filepath", + "/doc/", + "/doc/go_faq.html", + "/doc/go1.html", + "/info/:user/public", + "/info/:user/project/:project", + } + for _, route := range routes { + tree.addRoute(route, fakeHandler(route)) + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/cmd/test/", false, "/cmd/:tool/", Params{Param{"tool", "test"}}}, + {"/cmd/test", true, "", Params{Param{"tool", "test"}}}, + {"/cmd/test/3", false, "/cmd/:tool/:sub", Params{Param{"tool", "test"}, Param{"sub", "3"}}}, + {"/src/", false, "/src/*filepath", Params{Param{"filepath", "/"}}}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/", false, "/search/", nil}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/search/someth!ng+in+ünìcodé/", true, "", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + {"/user_gopher/about", false, "/user_:name/about", Params{Param{"name", "gopher"}}}, + {"/files/js/inc/framework.js", false, "/files/:dir/*filepath", Params{Param{"dir", "js"}, Param{"filepath", "/inc/framework.js"}}}, + {"/info/gordon/public", false, "/info/:user/public", Params{Param{"user", "gordon"}}}, + {"/info/gordon/project/go", false, "/info/:user/project/:project", Params{Param{"user", "gordon"}, Param{"project", "go"}}}, + }) + + checkPriorities(t, tree) + checkMaxParams(t, tree) +} + +func catchPanic(testFunc func()) (recv interface{}) { + defer func() { + recv = recover() + }() + + testFunc() + return +} + +type testRoute struct { + path string + conflict bool +} + +func testRoutes(t *testing.T, routes []testRoute) { + tree := &node{} + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route.path, nil) + }) + + if route.conflict { + if recv == nil { + t.Errorf("no panic for conflicting route '%s'", route.path) + } + } else if recv != nil { + t.Errorf("unexpected panic for route '%s': %v", route.path, recv) + } + } + + //printChildren(tree, "") +} + +func TestTreeWildcardConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/:tool/:sub", false}, + {"/cmd/vet", true}, + {"/src/*filepath", false}, + {"/src/*filepathx", true}, + {"/src/", true}, + {"/src1/", false}, + {"/src1/*filepath", true}, + {"/src2*filepath", true}, + {"/search/:query", false}, + {"/search/invalid", true}, + {"/user_:name", false}, + {"/user_x", true}, + {"/user_:name", false}, + {"/id:id", false}, + {"/id/:id", true}, + } + testRoutes(t, routes) +} + +func TestTreeChildConflict(t *testing.T) { + routes := []testRoute{ + {"/cmd/vet", false}, + {"/cmd/:tool/:sub", true}, + {"/src/AUTHORS", false}, + {"/src/*filepath", true}, + {"/user_x", false}, + {"/user_:name", true}, + {"/id/:id", false}, + {"/id:id", true}, + {"/:id", true}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +func TestTreeDupliatePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/", + "/doc/", + "/src/*filepath", + "/search/:query", + "/user_:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + + // Add again + recv = catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting duplicate route '%s", route) + } + } + + //printChildren(tree, "") + + checkRequests(t, tree, testRequests{ + {"/", false, "/", nil}, + {"/doc/", false, "/doc/", nil}, + {"/src/some/file.png", false, "/src/*filepath", Params{Param{"filepath", "/some/file.png"}}}, + {"/search/someth!ng+in+ünìcodé", false, "/search/:query", Params{Param{"query", "someth!ng+in+ünìcodé"}}}, + {"/user_gopher", false, "/user_:name", Params{Param{"name", "gopher"}}}, + }) +} + +func TestEmptyWildcardName(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/user:", + "/user:/", + "/cmd/:/", + "/src/*", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, nil) + }) + if recv == nil { + t.Fatalf("no panic while inserting route with empty wildcard name '%s", route) + } + } +} + +func TestTreeCatchAllConflict(t *testing.T) { + routes := []testRoute{ + {"/src/*filepath/x", true}, + {"/src2/", false}, + {"/src2/*filepath/x", true}, + } + testRoutes(t, routes) +} + +func TestTreeCatchAllConflictRoot(t *testing.T) { + routes := []testRoute{ + {"/", false}, + {"/*filepath", true}, + } + testRoutes(t, routes) +} + +/*func TestTreeDuplicateWildcard(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/:id/:name/:id", + } + for _, route := range routes { + ... + } +}*/ + +func TestTreeTrailingSlashRedirect(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/no/a", + "/no/b", + "/api/hello/:name", + } + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + //printChildren(tree, "") + + tsrRoutes := [...]string{ + "/hi/", + "/b", + "/search/gopher/", + "/cmd/vet", + "/src", + "/x/", + "/y", + "/0/go/", + "/1/go", + "/a", + "/doc/", + } + for _, route := range tsrRoutes { + handler, _, tsr := tree.getValue(route) + if handler != nil { + t.Fatalf("non-nil handler for TSR route '%s", route) + } else if !tsr { + t.Errorf("expected TSR recommendation for route '%s'", route) + } + } + + noTsrRoutes := [...]string{ + "/", + "/no", + "/no/", + "/_", + "/_/", + "/api/world/abc", + } + for _, route := range noTsrRoutes { + handler, _, tsr := tree.getValue(route) + if handler != nil { + t.Fatalf("non-nil handler for No-TSR route '%s", route) + } else if tsr { + t.Errorf("expected no TSR recommendation for route '%s'", route) + } + } +} + +func TestTreeFindCaseInsensitivePath(t *testing.T) { + tree := &node{} + + routes := [...]string{ + "/hi", + "/b/", + "/ABC/", + "/search/:query", + "/cmd/:tool/", + "/src/*filepath", + "/x", + "/x/y", + "/y/", + "/y/z", + "/0/:id", + "/0/:id/1", + "/1/:id/", + "/1/:id/2", + "/aa", + "/a/", + "/doc", + "/doc/go_faq.html", + "/doc/go1.html", + "/doc/go/away", + "/no/a", + "/no/b", + } + + for _, route := range routes { + recv := catchPanic(func() { + tree.addRoute(route, fakeHandler(route)) + }) + if recv != nil { + t.Fatalf("panic inserting route '%s': %v", route, recv) + } + } + + // Check out == in for all registered routes + // With fixTrailingSlash = true + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, true) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + // With fixTrailingSlash = false + for _, route := range routes { + out, found := tree.findCaseInsensitivePath(route, false) + if !found { + t.Errorf("Route '%s' not found!", route) + } else if string(out) != route { + t.Errorf("Wrong result for route '%s': %s", route, string(out)) + } + } + + tests := []struct { + in string + out string + found bool + slash bool + }{ + {"/HI", "/hi", true, false}, + {"/HI/", "/hi", true, true}, + {"/B", "/b/", true, true}, + {"/B/", "/b/", true, false}, + {"/abc", "/ABC/", true, true}, + {"/abc/", "/ABC/", true, false}, + {"/aBc", "/ABC/", true, true}, + {"/aBc/", "/ABC/", true, false}, + {"/abC", "/ABC/", true, true}, + {"/abC/", "/ABC/", true, false}, + {"/SEARCH/QUERY", "/search/QUERY", true, false}, + {"/SEARCH/QUERY/", "/search/QUERY", true, true}, + {"/CMD/TOOL/", "/cmd/TOOL/", true, false}, + {"/CMD/TOOL", "/cmd/TOOL/", true, true}, + {"/SRC/FILE/PATH", "/src/FILE/PATH", true, false}, + {"/x/Y", "/x/y", true, false}, + {"/x/Y/", "/x/y", true, true}, + {"/X/y", "/x/y", true, false}, + {"/X/y/", "/x/y", true, true}, + {"/X/Y", "/x/y", true, false}, + {"/X/Y/", "/x/y", true, true}, + {"/Y/", "/y/", true, false}, + {"/Y", "/y/", true, true}, + {"/Y/z", "/y/z", true, false}, + {"/Y/z/", "/y/z", true, true}, + {"/Y/Z", "/y/z", true, false}, + {"/Y/Z/", "/y/z", true, true}, + {"/y/Z", "/y/z", true, false}, + {"/y/Z/", "/y/z", true, true}, + {"/Aa", "/aa", true, false}, + {"/Aa/", "/aa", true, true}, + {"/AA", "/aa", true, false}, + {"/AA/", "/aa", true, true}, + {"/aA", "/aa", true, false}, + {"/aA/", "/aa", true, true}, + {"/A/", "/a/", true, false}, + {"/A", "/a/", true, true}, + {"/DOC", "/doc", true, false}, + {"/DOC/", "/doc", true, true}, + {"/NO", "", false, true}, + {"/DOC/GO", "", false, true}, + } + // With fixTrailingSlash = true + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, true) + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + // With fixTrailingSlash = false + for _, test := range tests { + out, found := tree.findCaseInsensitivePath(test.in, false) + if test.slash { + if found { // test needs a trailingSlash fix. It must not be found! + t.Errorf("Found without fixTrailingSlash: %s; got %s", test.in, string(out)) + } + } else { + if found != test.found || (found && (string(out) != test.out)) { + t.Errorf("Wrong result for '%s': got %s, %t; want %s, %t", + test.in, string(out), found, test.out, test.found) + return + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/.travis.yml b/Godeps/_workspace/src/github.com/pushrax/faststats/.travis.yml new file mode 100644 index 0000000..74012eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: 1.3 + +notifications: + email: false diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/AUTHORS b/Godeps/_workspace/src/github.com/pushrax/faststats/AUTHORS new file mode 100644 index 0000000..0fca9d4 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/AUTHORS @@ -0,0 +1,4 @@ +# This is the official list of faststats authors for copyright purposes. + +Justin Li + diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/LICENSE b/Godeps/_workspace/src/github.com/pushrax/faststats/LICENSE new file mode 100644 index 0000000..667bfb3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/LICENSE @@ -0,0 +1,24 @@ +faststats is released under a BSD 2-Clause license, reproduced below. + +Copyright (c) 2014, The faststats Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/README.md b/Godeps/_workspace/src/github.com/pushrax/faststats/README.md new file mode 100644 index 0000000..3593582 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/README.md @@ -0,0 +1,5 @@ +# faststats [![Build Status](https://api.travis-ci.org/pushrax/faststats.svg?branch=master)](https://travis-ci.org/pushrax/faststats) + +faststats is a Go package for calculating various statistical measures in real time. +It is intended to be used in online networking applications, where significant overhead just for stats collection is undesirable. + diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/faststats.go b/Godeps/_workspace/src/github.com/pushrax/faststats/faststats.go new file mode 100644 index 0000000..29f928e --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/faststats.go @@ -0,0 +1,10 @@ +// Copyright 2014 The faststats Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package faststats + +type Measure interface { + AddSample(sample float64) + Value() float64 +} diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/json.go b/Godeps/_workspace/src/github.com/pushrax/faststats/json.go new file mode 100644 index 0000000..9fcf0e8 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/json.go @@ -0,0 +1,11 @@ +// Copyright 2014 The faststats Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package faststats + +import "encoding/json" + +func (p *Percentile) MarshalJSON() ([]byte, error) { + return json.Marshal(p.Value()) +} diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/percentile.go b/Godeps/_workspace/src/github.com/pushrax/faststats/percentile.go new file mode 100644 index 0000000..333d56d --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/percentile.go @@ -0,0 +1,101 @@ +// Copyright 2014 The faststats Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package faststats + +import ( + "math" + "sort" + "sync/atomic" +) + +// Percentile implements an efficient percentile calculation of +// arbitrary float64 samples. +type Percentile struct { + percentile float64 + + samples int64 + offset int64 + + values []float64 + value uint64 // These bits are really a float64. +} + +// NewPercentile returns a Percentile with a given threshold. +func NewPercentile(percentile float64) *Percentile { + return &Percentile{ + percentile: percentile, + + // 256 samples is fast, and accurate for most distributions. + values: make([]float64, 0, 256), + } +} + +// NewPercentileWithWindow returns a Percentile with a given threshold +// and window size (accuracy). +func NewPercentileWithWindow(percentile float64, sampleWindow int) *Percentile { + return &Percentile{ + percentile: percentile, + values: make([]float64, 0, sampleWindow), + } +} + +// Value returns the current value at the stored percentile. +// It is thread-safe, and may be called concurrently with AddSample. +func (p *Percentile) Value() float64 { + bits := atomic.LoadUint64(&p.value) + return math.Float64frombits(bits) +} + +// AddSample adds a single float64 sample to the data set. +// It is not thread-safe, and must not be called in parallel. +func (p *Percentile) AddSample(sample float64) { + p.samples++ + + if len(p.values) == cap(p.values) { + target := float64(p.samples)*p.percentile - float64(cap(p.values))/2 + offset := round(math.Max(target, 0)) + + if sample > p.values[0] { + if offset > p.offset { + idx := sort.SearchFloat64s(p.values[1:], sample) + copy(p.values, p.values[1:idx+1]) + + p.values[idx] = sample + p.offset++ + } else if sample < p.values[len(p.values)-1] { + idx := sort.SearchFloat64s(p.values, sample) + copy(p.values[idx+1:], p.values[idx:]) + + p.values[idx] = sample + } + } else { + if offset > p.offset { + p.offset++ + } else { + copy(p.values[1:], p.values) + p.values[0] = sample + } + } + } else { + idx := sort.SearchFloat64s(p.values, sample) + p.values = p.values[:len(p.values)+1] + copy(p.values[idx+1:], p.values[idx:]) + p.values[idx] = sample + } + + bits := math.Float64bits(p.values[p.index()]) + atomic.StoreUint64(&p.value, bits) +} + +func (p *Percentile) index() int64 { + idx := round(float64(p.samples)*p.percentile - float64(p.offset)) + last := int64(len(p.values)) - 1 + + if idx > last { + return last + } + + return idx +} diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/percentile_test.go b/Godeps/_workspace/src/github.com/pushrax/faststats/percentile_test.go new file mode 100644 index 0000000..82bb9cc --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/percentile_test.go @@ -0,0 +1,77 @@ +// Copyright 2014 The faststats Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package faststats + +import ( + "math/rand" + "sort" + "testing" + "time" +) + +func TestPercentiles(t *testing.T) { + rand.Seed(time.Now().Unix()) + + testPercentile(t, uniform(10000, 1), 0.5) + testPercentile(t, uniform(10000, 1), 0.9) + testPercentile(t, uniform(10000, 10000), 0.5) + testPercentile(t, uniform(10000, 10000), 0.9) +} + +func TestLogNormPercentiles(t *testing.T) { + rand.Seed(time.Now().Unix()) + + testPercentile(t, logNorm(10000, 1), 0.5) + testPercentile(t, logNorm(10000, 1), 0.9) +} + +func testPercentile(t *testing.T, numbers sort.Float64Slice, percentile float64) { + p := NewPercentile(percentile) + + for i := 0; i < len(numbers); i++ { + p.AddSample(numbers[i]) + } + + sort.Sort(numbers) + got := p.Value() + index := round(float64(len(numbers)) * percentile) + + if got != numbers[index] && got != numbers[index-1] && got != numbers[index+1] { + t.Errorf("Percentile incorrect\n actual: %f\nexpected: %f, %f, %f\n", got, numbers[index-1], numbers[index], numbers[index+1]) + } +} + +func BenchmarkPercentiles64(b *testing.B) { + bencharkPercentile(b, uniform(b.N, 1), 64, 0.5) +} + +func BenchmarkPercentiles128(b *testing.B) { + bencharkPercentile(b, uniform(b.N, 1), 128, 0.5) +} + +func BenchmarkPercentiles256(b *testing.B) { + bencharkPercentile(b, uniform(b.N, 1), 256, 0.5) +} + +func BenchmarkPercentiles512(b *testing.B) { + bencharkPercentile(b, uniform(b.N, 1), 512, 0.5) +} + +func BenchmarkLNPercentiles128(b *testing.B) { + bencharkPercentile(b, logNorm(b.N, 1), 128, 0.5) +} + +func BenchmarkLNPercentiles256(b *testing.B) { + bencharkPercentile(b, logNorm(b.N, 1), 258, 0.5) +} + +func bencharkPercentile(b *testing.B, numbers sort.Float64Slice, window int, percentile float64) { + p := NewPercentileWithWindow(percentile, window) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + p.AddSample(numbers[i]) + } +} diff --git a/Godeps/_workspace/src/github.com/pushrax/faststats/util.go b/Godeps/_workspace/src/github.com/pushrax/faststats/util.go new file mode 100644 index 0000000..2073ace --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/faststats/util.go @@ -0,0 +1,40 @@ +// Copyright 2014 The faststats Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +package faststats + +import ( + "math" + "math/rand" +) + +func round(value float64) int64 { + if value < 0.0 { + value -= 0.5 + } else { + value += 0.5 + } + + return int64(value) +} + +func uniform(n int, scale float64) []float64 { + numbers := make([]float64, n) + + for i := 0; i < n; i++ { + numbers[i] = rand.Float64() * scale + } + + return numbers +} + +func logNorm(n int, scale float64) []float64 { + numbers := make([]float64, n) + + for i := 0; i < n; i++ { + numbers[i] = math.Exp(rand.NormFloat64()) * scale + } + + return numbers +} diff --git a/Godeps/_workspace/src/github.com/pushrax/flatjson/.travis.yml b/Godeps/_workspace/src/github.com/pushrax/flatjson/.travis.yml new file mode 100644 index 0000000..74012eb --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/flatjson/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: 1.3 + +notifications: + email: false diff --git a/Godeps/_workspace/src/github.com/pushrax/flatjson/AUTHORS b/Godeps/_workspace/src/github.com/pushrax/flatjson/AUTHORS new file mode 100644 index 0000000..54da53d --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/flatjson/AUTHORS @@ -0,0 +1,4 @@ +# This is the official list of flatjson authors for copyright purposes. + +Justin Li + diff --git a/Godeps/_workspace/src/github.com/pushrax/flatjson/LICENSE b/Godeps/_workspace/src/github.com/pushrax/flatjson/LICENSE new file mode 100644 index 0000000..d0cda40 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/flatjson/LICENSE @@ -0,0 +1,24 @@ +Chihaya is released under a BSD 2-Clause license, reproduced below. + +Copyright (c) 2014, The Chihaya Authors +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/pushrax/flatjson/README.md b/Godeps/_workspace/src/github.com/pushrax/flatjson/README.md new file mode 100644 index 0000000..08e0126 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/flatjson/README.md @@ -0,0 +1,68 @@ +# flatjson [![Build Status](https://api.travis-ci.org/pushrax/flatjson.svg?branch=master)](https://travis-ci.org/pushrax/flatjson) + +flatjson is a Go package for collapsing structs into a flat map, which can then be JSON encoded. +The map values are pointers to the original struct fields, so it does not need to be regenerated when the values are updated. + +Example use case: + +```json +{ + "Connections" { + "Open": 2, + "Accepted": 4 + }, + "ResponseTime": { + "P50": 0.045775, + "P90": 0.074299, + "P95": 0.096207 + }, + "Peers.IPv6": { + "Current": 0, + "Joined": 0, + "Left": 0, + "Reaped": 0, + "Completed": 0, + "Seeds": { + "Current": 0, + "Joined": 0, + "Left": 0, + "Reaped": 0 + } + }, + "Memory": { + "Alloc": 682208, + "TotalAlloc": 1032488, + "Sys": 5441784, + "Lookups": 28, + "Mallocs": 3326, + "Frees": 2567 + } +} +``` + +is instead serialized as: + +```json +{ + "Connections.Accepted": 4, + "Connections.Open": 2, + "Memory.Alloc": 682208, + "Memory.Frees": 2567, + "Memory.Lookups": 281, + "Memory.Mallocs": 3326, + "Memory.Sys": 5441784, + "Memory.TotalAlloc": 1032488, + "Peers.IPv6.Completed": 0, + "Peers.IPv6.Current": 0, + "Peers.IPv6.Joined": 0, + "Peers.IPv6.Left": 0, + "Peers.IPv6.Reaped": 0, + "Peers.IPv6.Seeds.Current": 0, + "Peers.IPv6.Seeds.Joined": 0, + "Peers.IPv6.Seeds.Left": 0, + "Peers.IPv6.Seeds.Reaped": 0, + "ResponseTime.P50": 0.045775, + "ResponseTime.P90": 0.074299, + "ResponseTime.P95": 0.096207 +} +``` diff --git a/Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson.go b/Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson.go new file mode 100644 index 0000000..ebf42a3 --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson.go @@ -0,0 +1,104 @@ +// Copyright 2014 The flatjson Authors. All rights reserved. +// Use of this source code is governed by the BSD 2-Clause license, +// which can be found in the LICENSE file. + +// Package flatjson implements a means of converting a struct into a flattened +// map suitable for JSON encoding. The values in the map are pointers to the +// original struct fields, so the map can be generated once and encoded whenever +// the underlying values change. +package flatjson + +import ( + "reflect" + "strings" +) + +type Map map[string]interface{} + +// Flatten returns the Map representation of val. +func Flatten(val interface{}) Map { + rval := reflect.ValueOf(val) + rval = extractStruct(rval, rval) + + if rval.Kind() != reflect.Struct { + panic("Flatten: must be called with a struct type") + } + + m := Map{} + recursiveFlatten(rval, "", m) + return m +} + +func keyForField(field reflect.StructField, v reflect.Value) (string, bool) { + if tag := field.Tag.Get("json"); tag != "" { + tokens := strings.SplitN(tag, ",", 2) + name := tokens[0] + opts := "" + + if len(tokens) > 1 { + opts = tokens[1] + } + + if name == "-" || strings.Contains(opts, "omitempty") && isEmptyValue(v) { + return "", false + } else if name != "" { + return name, false + } + } + + if field.Anonymous { + return "", true + } + return field.Name, false +} + +func extractStruct(val, fallback reflect.Value) reflect.Value { + switch val.Kind() { + case reflect.Struct: + return val + case reflect.Ptr: + return extractStruct(val.Elem(), fallback) + case reflect.Interface: + return extractStruct(val.Elem(), fallback) + default: + return fallback + } +} + +func recursiveFlatten(val reflect.Value, prefix string, output Map) int { + valType := val.Type() + added := 0 + + for i := 0; i < val.NumField(); i++ { + child := val.Field(i) + childType := valType.Field(i) + childPrefix := prefix + + key, anonymous := keyForField(childType, child) + + if childType.PkgPath != "" || (key == "" && !anonymous) { + continue + } else if !anonymous { + childPrefix = prefix + key + "." + } + + child = extractStruct(child, child) + + if child.Kind() == reflect.Struct { + childAdded := recursiveFlatten(child, childPrefix, output) + if childAdded != 0 { + added += childAdded + continue + } + } + + output[prefix+key] = child.Addr().Interface() + added++ + } + + return added +} + +func isEmptyValue(v reflect.Value) bool { + return v.Interface() == reflect.Zero(v.Type()).Interface() +} diff --git a/Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson_test.go b/Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson_test.go new file mode 100644 index 0000000..3906bdb --- /dev/null +++ b/Godeps/_workspace/src/github.com/pushrax/flatjson/flatjson_test.go @@ -0,0 +1,143 @@ +package flatjson_test + +import ( + "encoding/json" + "reflect" + "testing" + + "github.com/pushrax/flatjson" +) + +type Child struct { + C int `json:"CC"` + D string `json:"CD"` +} + +func TestBasicFlatten(t *testing.T) { + val := &struct { + A int + B string + }{10, "str"} + + expected := flatjson.Map{ + "A": 10.0, // JSON numbers are all float64. + "B": "str", + } + + testFlattening(t, val, expected) +} + +func TestEmbeddedFlatten(t *testing.T) { + val := &struct { + Child // Embedded. + Other Child // Regular child. + A int + }{} + + expected := flatjson.Map{ + "A": 0.0, + "CC": 0.0, + "CD": "", + "Other.CC": 0.0, + "Other.CD": "", + } + + testFlattening(t, val, expected) +} + +func TestIndirection(t *testing.T) { + o2 := &Child{5, "6"} + + val := &struct { + *Child + Other1 interface{} `json:"O1"` + Other2 **Child `json:"O2"` + Other3 *Child `json:",omitempty"` + }{ + Child: &Child{1, "2"}, + Other1: &Child{3, "4"}, + Other2: &o2, + } + + expected := flatjson.Map{ + "CC": 1.0, + "CD": "2", + "O1.CC": 3.0, + "O1.CD": "4", + "O2.CC": 5.0, + "O2.CD": "6", + } + + testFlattening(t, val, expected) +} + +type L3 struct{ A string } +type L2 struct{ L3 } +type L1 struct{ L2 } +type L0 struct{ L1 } + +func TestDeepNesting(t *testing.T) { + val := &L0{} + val.A = "abc" + + expected := flatjson.Map{"A": "abc"} + testFlattening(t, val, expected) +} + +type TL1 struct { + L2 `json:"L2"` +} +type TL0 struct { + TL1 `json:"L1"` +} + +func TestDeepTagNesting(t *testing.T) { + val := &TL0{} + val.A = "abc" + + expected := flatjson.Map{"L1.L2.A": "abc"} + testFlattening(t, val, expected) +} + +func TestValidInputs(t *testing.T) { + val := &struct{ A int }{10} + expected := flatjson.Map{"A": 10.0} + + testFlattening(t, val, expected) + testFlattening(t, &val, expected) +} + +func TestInvalidInputs(t *testing.T) { + testPanic(t, struct{ A int }{}) + testPanic(t, 123) + testPanic(t, "abc") +} + +func testPanic(t *testing.T, val interface{}) { + defer func() { + if recover() == nil { + t.Errorf("Expected panic for input %#v\n", val) + } + }() + + testFlattening(t, val, flatjson.Map{}) +} + +func testFlattening(t *testing.T, val interface{}, expected flatjson.Map) { + flat := flatjson.Flatten(val) + + enc, err := json.Marshal(flat) + if err != nil { + t.Fatal(err) + } + + got := flatjson.Map{} + err = json.Unmarshal(enc, &got) + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got, expected) { + t.Errorf("Unmarshalled to unexpected value:\n got: %#v\nexpected: %#v\n", got, expected) + } +} diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/.gitignore b/Godeps/_workspace/src/github.com/stretchr/graceful/.gitignore new file mode 100644 index 0000000..8365624 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/LICENSE b/Godeps/_workspace/src/github.com/stretchr/graceful/LICENSE new file mode 100644 index 0000000..abdb204 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/README.md b/Godeps/_workspace/src/github.com/stretchr/graceful/README.md new file mode 100644 index 0000000..17d9560 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/README.md @@ -0,0 +1,114 @@ +graceful [![GoDoc](https://godoc.org/github.com/stretchr/graceful?status.png)](http://godoc.org/github.com/stretchr/graceful) [![wercker status](https://app.wercker.com/status/2729ba763abf87695a17547e0f7af4a4/s "wercker status")](https://app.wercker.com/project/bykey/2729ba763abf87695a17547e0f7af4a4) +======== + +Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers. + +## Usage + +Usage of Graceful is simple. Create your http.Handler and pass it to the `Run` function: + +```go +import ( + "github.com/stretchr/graceful" + "net/http" + "fmt" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + graceful.Run(":3001",10*time.Second,mux) +} +``` + +Another example, using [Negroni](https://github.com/codegangsta/negroni), functions in much the same manner: + +```go +package main + +import ( + "github.com/codegangsta/negroni" + "github.com/stretchr/graceful" + "net/http" + "fmt" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.Classic() + n.UseHandler(mux) + //n.Run(":3000") + graceful.Run(":3001",10*time.Second,n) +} +``` + +In addition to Run there are the http.Server counterparts ListenAndServe, ListenAndServeTLS and Serve, which allow you to configure HTTPS, custom timeouts and error handling. +Graceful may also be used by instantiating its Server type directly, which embeds an http.Server: + +```go +mux := // ... + +srv := &graceful.Server{ + Timeout: 10 * time.Second, + + Server: &http.Server{ + Addr: ":1234", + Handler: mux, + }, +} + +srv.ListenAndServe() +``` + +This form allows you to set the ConnState callback, which works in the same way as in http.Server: + +```go +mux := // ... + +srv := &graceful.Server{ + Timeout: 10 * time.Second, + + ConnState: func(conn net.Conn, state http.ConnState) { + // conn has a new state + }, + + Server: &http.Server{ + Addr: ":1234", + Handler: mux, + }, +} + +srv.ListenAndServe() +``` + +## Behaviour + +When Graceful is sent a SIGINT or SIGTERM (possibly from ^C or a kill command), it: + +1. Disables keepalive connections. +2. Closes the listening socket, allowing another process to listen on that port immediately. +3. Starts a timer of `timeout` duration to give active requests a chance to finish. +4. When timeout expires, closes all active connections. +5. Closes the `stopChan`, waking up any blocking goroutines. +6. Returns from the function, allowing the server to terminate. + +## Notes + +If the `timeout` argument to `Run` is 0, the server never times out, allowing all active requests to complete. + +If you wish to stop the server in some way other than an OS signal, you may call the `Stop()` function. +This function stops the server, gracefully, using the new timeout value you provide. The `StopChan()` function +returns a channel on which you can block while waiting for the server to stop. This channel will be closed when +the server is stopped, allowing your execution to proceed. Multiple goroutines can block on this channel at the +same time and all will be signalled when stopping is complete. + +## Contributing + +Before sending a pull request, please open a new issue describing the feature/issue you wish to address so it can be discussed. The subsequent pull request should close that issue. diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/graceful.go b/Godeps/_workspace/src/github.com/stretchr/graceful/graceful.go new file mode 100644 index 0000000..25ce2a2 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/graceful.go @@ -0,0 +1,272 @@ +package graceful + +import ( + "crypto/tls" + "log" + "net" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/stretchr/pat/stop" + "code.google.com/p/go.net/netutil" +) + +// Server wraps an http.Server with graceful connection handling. +// It may be used directly in the same way as http.Server, or may +// be constructed with the global functions in this package. +// +// Example: +// srv := &graceful.Server{ +// Timeout: 5 * time.Second, +// Server: &http.Server{Addr: ":1234", Handler: handler}, +// } +// srv.ListenAndServe() +type Server struct { + *http.Server + + // Timeout is the duration to allow outstanding requests to survive + // before forcefully terminating them. + Timeout time.Duration + + // Limit the number of outstanding requests + ListenLimit int + + // ConnState specifies an optional callback function that is + // called when a client connection changes state. This is a proxy + // to the underlying http.Server's ConnState, and the original + // must not be set directly. + ConnState func(net.Conn, http.ConnState) + + // ShutdownInitiated is an optional callback function that is called + // when shutdown is initiated. It can be used to notify the client + // side of long lived connections (e.g. websockets) to reconnect. + ShutdownInitiated func() + + // interrupt signals the listener to stop serving connections, + // and the server to shut down. + interrupt chan os.Signal + + // stopChan is the channel on which callers may block while waiting for + // the server to stop. + stopChan chan stop.Signal + + // stopChanOnce is used to create the stop channel on demand, once, per + // instance. + stopChanOnce sync.Once + + // connections holds all connections managed by graceful + connections map[net.Conn]struct{} +} + +// ensure Server conforms to stop.Stopper +var _ stop.Stopper = (*Server)(nil) + +// Run serves the http.Handler with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func Run(addr string, timeout time.Duration, n http.Handler) { + srv := &Server{ + Timeout: timeout, + Server: &http.Server{Addr: addr, Handler: n}, + } + + if err := srv.ListenAndServe(); err != nil { + if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { + logger := log.New(os.Stdout, "[graceful] ", 0) + logger.Fatal(err) + } + } +} + +// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func ListenAndServe(server *http.Server, timeout time.Duration) error { + srv := &Server{Timeout: timeout, Server: server} + return srv.ListenAndServe() +} + +// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled. +func (srv *Server) ListenAndServe() error { + // Create the listener so we can control their lifetime + addr := srv.Addr + if addr == "" { + addr = ":http" + } + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + if srv.ListenLimit != 0 { + l = netutil.LimitListener(l, srv.ListenLimit) + } + return srv.Serve(l) +} + +// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func ListenAndServeTLS(server *http.Server, certFile, keyFile string, timeout time.Duration) error { + // Create the listener ourselves so we can control its lifetime + srv := &Server{Timeout: timeout, Server: server} + addr := srv.Addr + if addr == "" { + addr = ":https" + } + + config := &tls.Config{} + if srv.TLSConfig != nil { + *config = *srv.TLSConfig + } + if config.NextProtos == nil { + config.NextProtos = []string{"http/1.1"} + } + + var err error + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return err + } + + conn, err := net.Listen("tcp", addr) + if err != nil { + return err + } + + tlsListener := tls.NewListener(conn, config) + return srv.Serve(tlsListener) +} + +// Serve is equivalent to http.Server.Serve with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func Serve(server *http.Server, l net.Listener, timeout time.Duration) error { + srv := &Server{Timeout: timeout, Server: server} + return srv.Serve(l) +} + +// Serve is equivalent to http.Server.Serve with graceful shutdown enabled. +func (srv *Server) Serve(listener net.Listener) error { + // Track connection state + add := make(chan net.Conn) + remove := make(chan net.Conn) + + srv.Server.ConnState = func(conn net.Conn, state http.ConnState) { + switch state { + case http.StateNew: + add <- conn + case http.StateClosed, http.StateHijacked: + remove <- conn + } + + if srv.ConnState != nil { + srv.ConnState(conn, state) + } + } + + // Manage open connections + shutdown := make(chan chan struct{}) + kill := make(chan struct{}) + go func() { + var done chan struct{} + srv.connections = map[net.Conn]struct{}{} + for { + select { + case conn := <-add: + srv.connections[conn] = struct{}{} + case conn := <-remove: + delete(srv.connections, conn) + if done != nil && len(srv.connections) == 0 { + done <- struct{}{} + return + } + case done = <-shutdown: + if len(srv.connections) == 0 { + done <- struct{}{} + return + } + case <-kill: + for k := range srv.connections { + k.Close() + } + return + } + } + }() + + if srv.interrupt == nil { + srv.interrupt = make(chan os.Signal, 1) + } + + // Set up the interrupt catch + signal.Notify(srv.interrupt, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-srv.interrupt + srv.SetKeepAlivesEnabled(false) + listener.Close() + + if srv.ShutdownInitiated != nil { + srv.ShutdownInitiated() + } + + signal.Stop(srv.interrupt) + close(srv.interrupt) + }() + + // Serve with graceful listener. + // Execution blocks here until listener.Close() is called, above. + err := srv.Server.Serve(listener) + + // Request done notification + done := make(chan struct{}) + shutdown <- done + + if srv.Timeout > 0 { + select { + case <-done: + case <-time.After(srv.Timeout): + close(kill) + } + } else { + <-done + } + // Close the stopChan to wake up any blocked goroutines. + if srv.stopChan != nil { + close(srv.stopChan) + } + return err +} + +// Stop instructs the type to halt operations and close +// the stop channel when it is finished. +// +// timeout is grace period for which to wait before shutting +// down the server. The timeout value passed here will override the +// timeout given when constructing the server, as this is an explicit +// command to stop the server. +func (srv *Server) Stop(timeout time.Duration) { + srv.Timeout = timeout + srv.interrupt <- syscall.SIGINT +} + +// StopChan gets the stop channel which will block until +// stopping has completed, at which point it is closed. +// Callers should never close the stop channel. +func (srv *Server) StopChan() <-chan stop.Signal { + srv.stopChanOnce.Do(func() { + if srv.stopChan == nil { + srv.stopChan = stop.Make() + } + }) + return srv.stopChan +} diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/graceful_test.go b/Godeps/_workspace/src/github.com/stretchr/graceful/graceful_test.go new file mode 100644 index 0000000..871df34 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/graceful_test.go @@ -0,0 +1,322 @@ +package graceful + +import ( + "io" + "net" + "net/http" + "net/url" + "os" + "reflect" + "sync" + "syscall" + "testing" + "time" +) + +var killTime = 50 * time.Millisecond + +func runQuery(t *testing.T, expected int, shouldErr bool, wg *sync.WaitGroup) { + wg.Add(1) + defer wg.Done() + client := http.Client{} + r, err := client.Get("http://localhost:3000") + if shouldErr && err == nil { + t.Fatal("Expected an error but none was encountered.") + } else if shouldErr && err != nil { + if err.(*url.Error).Err == io.EOF { + return + } + errno := err.(*url.Error).Err.(*net.OpError).Err.(syscall.Errno) + if errno == syscall.ECONNREFUSED { + return + } else if err != nil { + t.Fatal("Error on Get:", err) + } + } + + if r != nil && r.StatusCode != expected { + t.Fatalf("Incorrect status code on response. Expected %d. Got %d", expected, r.StatusCode) + } else if r == nil { + t.Fatal("No response when a response was expected.") + } +} + +func createListener(sleep time.Duration) (*http.Server, net.Listener, error) { + mux := http.NewServeMux() + mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + time.Sleep(sleep) + rw.WriteHeader(http.StatusOK) + }) + + server := &http.Server{Addr: ":3000", Handler: mux} + l, err := net.Listen("tcp", ":3000") + return server, l, err +} + +func runServer(timeout, sleep time.Duration, c chan os.Signal) error { + server, l, err := createListener(sleep) + if err != nil { + return err + } + + srv := &Server{Timeout: timeout, Server: server, interrupt: c} + return srv.Serve(l) +} + +func launchTestQueries(t *testing.T, wg *sync.WaitGroup, c chan os.Signal) { + for i := 0; i < 8; i++ { + go runQuery(t, http.StatusOK, false, wg) + } + + time.Sleep(10 * time.Millisecond) + c <- os.Interrupt + time.Sleep(10 * time.Millisecond) + + for i := 0; i < 8; i++ { + go runQuery(t, 0, true, wg) + } + + wg.Done() +} + +func TestGracefulRun(t *testing.T) { + c := make(chan os.Signal, 1) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + runServer(killTime, killTime/2, c) + wg.Done() + }() + + wg.Add(1) + go launchTestQueries(t, &wg, c) + wg.Wait() +} + +func TestGracefulRunTimesOut(t *testing.T) { + c := make(chan os.Signal, 1) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + runServer(killTime, killTime*10, c) + wg.Done() + }() + + wg.Add(1) + go func() { + for i := 0; i < 8; i++ { + go runQuery(t, 0, true, &wg) + } + time.Sleep(10 * time.Millisecond) + c <- os.Interrupt + time.Sleep(10 * time.Millisecond) + for i := 0; i < 8; i++ { + go runQuery(t, 0, true, &wg) + } + wg.Done() + }() + + wg.Wait() + +} + +func TestGracefulRunDoesntTimeOut(t *testing.T) { + c := make(chan os.Signal, 1) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + runServer(0, killTime*2, c) + wg.Done() + }() + + wg.Add(1) + go launchTestQueries(t, &wg, c) + wg.Wait() +} + +func TestGracefulRunNoRequests(t *testing.T) { + c := make(chan os.Signal, 1) + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + runServer(0, killTime*2, c) + wg.Done() + }() + + c <- os.Interrupt + + wg.Wait() + +} + +func TestGracefulForwardsConnState(t *testing.T) { + c := make(chan os.Signal, 1) + states := make(map[http.ConnState]int) + + connState := func(conn net.Conn, state http.ConnState) { + states[state]++ + } + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + server, l, _ := createListener(killTime / 2) + srv := &Server{ + ConnState: connState, + Timeout: killTime, + Server: server, + interrupt: c, + } + srv.Serve(l) + + wg.Done() + }() + + wg.Add(1) + go launchTestQueries(t, &wg, c) + wg.Wait() + + expected := map[http.ConnState]int{ + http.StateNew: 8, + http.StateActive: 8, + http.StateClosed: 8, + } + + if !reflect.DeepEqual(states, expected) { + t.Errorf("Incorrect connection state tracking.\n actual: %v\nexpected: %v\n", states, expected) + } +} + +func TestGracefulExplicitStop(t *testing.T) { + server, l, err := createListener(1 * time.Millisecond) + if err != nil { + t.Fatal(err) + } + + srv := &Server{Timeout: killTime, Server: server} + + go func() { + go srv.Serve(l) + time.Sleep(10 * time.Millisecond) + srv.Stop(killTime) + }() + + // block on the stopChan until the server has shut down + select { + case <-srv.StopChan(): + case <-time.After(100 * time.Millisecond): + t.Fatal("Timed out while waiting for explicit stop to complete") + } +} + +func TestGracefulExplicitStopOverride(t *testing.T) { + server, l, err := createListener(1 * time.Millisecond) + if err != nil { + t.Fatal(err) + } + + srv := &Server{Timeout: killTime, Server: server} + + go func() { + go srv.Serve(l) + time.Sleep(10 * time.Millisecond) + srv.Stop(killTime / 2) + }() + + // block on the stopChan until the server has shut down + select { + case <-srv.StopChan(): + case <-time.After(killTime): + t.Fatal("Timed out while waiting for explicit stop to complete") + } +} + +func TestShutdownInitiatedCallback(t *testing.T) { + server, l, err := createListener(1 * time.Millisecond) + if err != nil { + t.Fatal(err) + } + + called := make(chan struct{}) + cb := func() { close(called) } + + srv := &Server{Server: server, ShutdownInitiated: cb} + + go func() { + go srv.Serve(l) + time.Sleep(10 * time.Millisecond) + srv.Stop(killTime) + }() + + select { + case <-called: + case <-time.After(killTime): + t.Fatal("Timed out while waiting for ShutdownInitiated callback to be called") + } +} +func hijackingListener(srv *Server) (*http.Server, net.Listener, error) { + mux := http.NewServeMux() + mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + conn, bufrw, err := rw.(http.Hijacker).Hijack() + if err != nil { + http.Error(rw, "webserver doesn't support hijacking", http.StatusInternalServerError) + return + } + + defer conn.Close() + + bufrw.WriteString("HTTP/1.1 200 OK\r\n\r\n") + bufrw.Flush() + }) + + server := &http.Server{Addr: ":3000", Handler: mux} + l, err := net.Listen("tcp", ":3000") + return server, l, err +} + +func TestNotifyClosed(t *testing.T) { + c := make(chan os.Signal, 1) + + var wg sync.WaitGroup + wg.Add(1) + + srv := &Server{Timeout: killTime, interrupt: c} + server, l, err := hijackingListener(srv) + if err != nil { + t.Fatal(err) + } + + srv.Server = server + + go func() { + srv.Serve(l) + wg.Done() + }() + + for i := 0; i < 8; i++ { + runQuery(t, http.StatusOK, false, &wg) + } + + if len(srv.connections) > 0 { + t.Fatal("hijacked connections should not be managed") + } + + srv.Stop(0) + + // block on the stopChan until the server has shut down + select { + case <-srv.StopChan(): + case <-time.After(100 * time.Millisecond): + t.Fatal("Timed out while waiting for explicit stop to complete") + } + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/tests/main.go b/Godeps/_workspace/src/github.com/stretchr/graceful/tests/main.go new file mode 100644 index 0000000..f9a6c4a --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/tests/main.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + "sync" + + "github.com/codegangsta/negroni" + "github.com/stretchr/graceful" +) + +func main() { + + var wg sync.WaitGroup + + wg.Add(3) + go func() { + n := negroni.New() + fmt.Println("Launching server on :3000") + graceful.Run(":3000", 0, n) + fmt.Println("Terminated server on :3000") + wg.Done() + }() + go func() { + n := negroni.New() + fmt.Println("Launching server on :3001") + graceful.Run(":3001", 0, n) + fmt.Println("Terminated server on :3001") + wg.Done() + }() + go func() { + n := negroni.New() + fmt.Println("Launching server on :3002") + graceful.Run(":3002", 0, n) + fmt.Println("Terminated server on :3002") + wg.Done() + }() + fmt.Println("Press ctrl+c. All servers should terminate.") + wg.Wait() + +} diff --git a/Godeps/_workspace/src/github.com/stretchr/graceful/wercker.yml b/Godeps/_workspace/src/github.com/stretchr/graceful/wercker.yml new file mode 100644 index 0000000..41d2c52 --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/graceful/wercker.yml @@ -0,0 +1 @@ +box: wercker/golang diff --git a/Godeps/_workspace/src/github.com/stretchr/pat/stop/doc.go b/Godeps/_workspace/src/github.com/stretchr/pat/stop/doc.go new file mode 100644 index 0000000..6c6ab1b --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/pat/stop/doc.go @@ -0,0 +1,46 @@ +// Package stop represents a pattern for types that need to do some work +// when stopping. The StopChan method returns a <-chan stop.Signal which +// is closed when the operation has completed. +// +// Stopper types when implementing the stop channel pattern should use stop.Make +// to create and store a stop channel, and close the channel once stopping has completed: +// func New() Type { +// t := new(Type) +// t.stopChan = stop.Make() +// return t +// } +// func (t Type) Stop() { +// go func(){ +// // TODO: tear stuff down +// close(t.stopChan) +// }() +// } +// func (t Type) StopChan() <-chan stop.Signal { +// return t.stopChan +// } +// +// Stopper types can be stopped in the following ways: +// // stop and forget +// t.Stop(1 * time.Second) +// +// // stop and wait +// t.Stop(1 * time.Second) +// <-t.StopChan() +// +// // stop, do more work, then wait +// t.Stop(1 * time.Second); +// // do more work +// <-t.StopChan() +// +// // stop and timeout after 1 second +// t.Stop(1 * time.Second) +// select { +// case <-t.StopChan(): +// case <-time.After(1 * time.Second): +// } +// +// // stop.All is the same as calling Stop() then StopChan() so +// // all above patterns also work on many Stopper types, +// // for example; stop and wait for many things: +// <-stop.All(1 * time.Second, t1, t2, t3) +package stop diff --git a/Godeps/_workspace/src/github.com/stretchr/pat/stop/stop.go b/Godeps/_workspace/src/github.com/stretchr/pat/stop/stop.go new file mode 100644 index 0000000..6a7792d --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/pat/stop/stop.go @@ -0,0 +1,57 @@ +package stop + +import "time" + +// Signal is the type that gets sent down the stop channel. +type Signal struct{} + +// NoWait represents a time.Duration with zero value. +// Logically meaning no grace wait period when stopping. +var NoWait time.Duration + +// Stopper represents types that implement +// the stop channel pattern. +type Stopper interface { + // Stop instructs the type to halt operations and close + // the stop channel when it is finished. + Stop(wait time.Duration) + // StopChan gets the stop channel which will block until + // stopping has completed, at which point it is closed. + // Callers should never close the stop channel. + // The StopChan should exist from the point at which operations + // begun, not the point at which Stop was called. + StopChan() <-chan Signal +} + +// Stopped returns a channel that signals immediately. Useful for +// cases when no tear-down work is required and stopping is +// immediate. +func Stopped() <-chan Signal { + c := Make() + close(c) + return c +} + +// Make makes a new channel used to indicate when +// stopping has finished. Sends to channel will not block. +func Make() chan Signal { + return make(chan Signal, 0) +} + +// All stops all Stopper types and returns another channel +// which will close once all things have finished stopping. +func All(wait time.Duration, stoppers ...Stopper) <-chan Signal { + all := Make() + go func() { + var allChans []<-chan Signal + for _, stopper := range stoppers { + go stopper.Stop(wait) + allChans = append(allChans, stopper.StopChan()) + } + for _, ch := range allChans { + <-ch + } + close(all) + }() + return all +} diff --git a/Godeps/_workspace/src/github.com/stretchr/pat/stop/stop_test.go b/Godeps/_workspace/src/github.com/stretchr/pat/stop/stop_test.go new file mode 100644 index 0000000..d47443f --- /dev/null +++ b/Godeps/_workspace/src/github.com/stretchr/pat/stop/stop_test.go @@ -0,0 +1,76 @@ +package stop_test + +import ( + "testing" + "time" + + "github.com/stretchr/pat/stop" +) + +type testStopper struct { + stopChan chan stop.Signal +} + +func NewTestStopper() *testStopper { + s := new(testStopper) + s.stopChan = stop.Make() + return s +} + +func (t *testStopper) Stop(wait time.Duration) { + go func() { + time.Sleep(100 * time.Millisecond) + close(t.stopChan) + }() +} +func (t *testStopper) StopChan() <-chan stop.Signal { + return t.stopChan +} + +type noopStopper struct{} + +func (t *noopStopper) Stop() { +} +func (t *noopStopper) StopChan() <-chan stop.Signal { + return stop.Stopped() +} + +func TestStop(t *testing.T) { + + s := NewTestStopper() + s.Stop(1 * time.Second) + stopChan := s.StopChan() + select { + case <-stopChan: + case <-time.After(1 * time.Second): + t.Error("Stop signal was never sent (timed out)") + } + +} + +func TestAll(t *testing.T) { + + s1 := NewTestStopper() + s2 := NewTestStopper() + s3 := NewTestStopper() + + select { + case <-stop.All(1*time.Second, s1, s2, s3): + case <-time.After(1 * time.Second): + t.Error("All signal was never sent (timed out)") + } + +} + +func TestNoop(t *testing.T) { + + s := new(noopStopper) + s.Stop() + stopChan := s.StopChan() + select { + case <-stopChan: + case <-time.After(1 * time.Second): + t.Error("Stop signal was never sent (timed out)") + } + +}