From ccfdd1ff63bb9835490beef8ee67ddb1a05b4b60 Mon Sep 17 00:00:00 2001 From: emile Date: Tue, 22 Sep 2015 11:58:43 +0200 Subject: [PATCH] Removes Godeps/_workspace/src from git --- Godeps/_workspace/.gitignore | 1 + .../src/github.com/BurntSushi/toml/.gitignore | 5 - .../github.com/BurntSushi/toml/.travis.yml | 12 - .../src/github.com/BurntSushi/toml/COMPATIBLE | 3 - .../src/github.com/BurntSushi/toml/COPYING | 14 - .../src/github.com/BurntSushi/toml/Makefile | 19 - .../src/github.com/BurntSushi/toml/README.md | 220 -- .../toml/cmd/toml-test-decoder/COPYING | 14 - .../toml/cmd/toml-test-decoder/README.md | 14 - .../toml/cmd/toml-test-decoder/main.go | 90 - .../toml/cmd/toml-test-encoder/COPYING | 14 - .../toml/cmd/toml-test-encoder/README.md | 14 - .../toml/cmd/toml-test-encoder/main.go | 131 -- .../BurntSushi/toml/cmd/tomlv/COPYING | 14 - .../BurntSushi/toml/cmd/tomlv/README.md | 22 - .../BurntSushi/toml/cmd/tomlv/main.go | 61 - .../src/github.com/BurntSushi/toml/decode.go | 472 ---- .../github.com/BurntSushi/toml/decode_meta.go | 99 - .../github.com/BurntSushi/toml/decode_test.go | 540 ----- .../src/github.com/BurntSushi/toml/doc.go | 27 - .../src/github.com/BurntSushi/toml/encode.go | 521 ----- .../github.com/BurntSushi/toml/encode_test.go | 506 ----- .../BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - .../src/github.com/BurntSushi/toml/lex.go | 725 ------ .../src/github.com/BurntSushi/toml/parse.go | 417 ---- .../github.com/BurntSushi/toml/session.vim | 1 - .../github.com/BurntSushi/toml/type_check.go | 85 - .../github.com/BurntSushi/toml/type_fields.go | 241 -- .../src/github.com/BurntSushi/ty/.gitignore | 3 - .../src/github.com/BurntSushi/ty/COPYING | 13 - .../src/github.com/BurntSushi/ty/Makefile | 28 - .../src/github.com/BurntSushi/ty/README.md | 119 - .../src/github.com/BurntSushi/ty/data/doc.go | 82 - .../github.com/BurntSushi/ty/data/ordmap.go | 175 -- .../BurntSushi/ty/data/ordmap_test.go | 62 - .../src/github.com/BurntSushi/ty/doc.go | 22 - .../src/github.com/BurntSushi/ty/fun/chan.go | 84 - .../github.com/BurntSushi/ty/fun/chan_test.go | 23 - .../src/github.com/BurntSushi/ty/fun/doc.go | 118 - .../github.com/BurntSushi/ty/fun/fun_test.go | 51 - .../src/github.com/BurntSushi/ty/fun/func.go | 35 - .../github.com/BurntSushi/ty/fun/func_test.go | 111 - .../src/github.com/BurntSushi/ty/fun/list.go | 303 --- .../BurntSushi/ty/fun/list_parmap_test.go | 148 -- .../github.com/BurntSushi/ty/fun/list_test.go | 149 -- .../src/github.com/BurntSushi/ty/fun/map.go | 46 - .../github.com/BurntSushi/ty/fun/map_test.go | 25 - .../src/github.com/BurntSushi/ty/fun/rand.go | 94 - .../github.com/BurntSushi/ty/fun/rand_test.go | 100 - .../src/github.com/BurntSushi/ty/fun/set.go | 99 - .../github.com/BurntSushi/ty/fun/set_test.go | 84 - .../src/github.com/BurntSushi/ty/fun/sort.go | 98 - .../github.com/BurntSushi/ty/fun/sort_test.go | 120 - .../src/github.com/BurntSushi/ty/fun/util.go | 37 - .../ty/perf/builtin-vs-reflect.bench | 35 - .../src/github.com/BurntSushi/ty/session.vim | 1 - .../github.com/BurntSushi/ty/type-check.go | 338 --- .../src/github.com/BurntSushi/ty/tyvars.go | 28 - .../github.com/alecthomas/template/README.md | 25 - .../src/github.com/alecthomas/template/doc.go | 406 ---- .../alecthomas/template/example_test.go | 71 - .../alecthomas/template/examplefiles_test.go | 182 -- .../alecthomas/template/examplefunc_test.go | 54 - .../github.com/alecthomas/template/exec.go | 844 ------- .../alecthomas/template/exec_test.go | 1044 --------- .../github.com/alecthomas/template/funcs.go | 598 ----- .../github.com/alecthomas/template/helper.go | 108 - .../alecthomas/template/multi_test.go | 292 --- .../alecthomas/template/parse/lex.go | 556 ----- .../alecthomas/template/parse/lex_test.go | 468 ---- .../alecthomas/template/parse/node.go | 834 ------- .../alecthomas/template/parse/parse.go | 700 ------ .../alecthomas/template/parse/parse_test.go | 426 ---- .../alecthomas/template/template.go | 217 -- .../alecthomas/template/testdata/file1.tmpl | 2 - .../alecthomas/template/testdata/file2.tmpl | 2 - .../alecthomas/template/testdata/tmpl1.tmpl | 3 - .../alecthomas/template/testdata/tmpl2.tmpl | 3 - .../src/github.com/alecthomas/units/COPYING | 19 - .../src/github.com/alecthomas/units/README.md | 11 - .../src/github.com/alecthomas/units/bytes.go | 83 - .../github.com/alecthomas/units/bytes_test.go | 49 - .../src/github.com/alecthomas/units/doc.go | 13 - .../src/github.com/alecthomas/units/si.go | 26 - .../src/github.com/alecthomas/units/util.go | 138 -- .../github.com/cenkalti/backoff/.gitignore | 22 - .../github.com/cenkalti/backoff/.travis.yml | 2 - .../src/github.com/cenkalti/backoff/LICENSE | 20 - .../src/github.com/cenkalti/backoff/README.md | 116 - .../cenkalti/backoff/adv_example_test.go | 117 - .../github.com/cenkalti/backoff/backoff.go | 59 - .../cenkalti/backoff/backoff_test.go | 27 - .../cenkalti/backoff/example_test.go | 51 - .../cenkalti/backoff/exponential.go | 151 -- .../cenkalti/backoff/exponential_test.go | 108 - .../src/github.com/cenkalti/backoff/retry.go | 46 - .../github.com/cenkalti/backoff/retry_test.go | 34 - .../src/github.com/cenkalti/backoff/ticker.go | 79 - .../cenkalti/backoff/ticker_test.go | 45 - .../codahale/hdrhistogram/.travis.yml | 9 - .../github.com/codahale/hdrhistogram/LICENSE | 21 - .../codahale/hdrhistogram/README.md | 15 - .../github.com/codahale/hdrhistogram/hdr.go | 520 ----- .../codahale/hdrhistogram/hdr_test.go | 344 --- .../codahale/hdrhistogram/window.go | 45 - .../codahale/hdrhistogram/window_test.go | 64 - .../github.com/codegangsta/negroni/LICENSE | 21 - .../github.com/codegangsta/negroni/README.md | 181 -- .../src/github.com/codegangsta/negroni/doc.go | 25 - .../github.com/codegangsta/negroni/logger.go | 29 - .../codegangsta/negroni/logger_test.go | 33 - .../github.com/codegangsta/negroni/negroni.go | 129 -- .../codegangsta/negroni/negroni_test.go | 75 - .../codegangsta/negroni/recovery.go | 46 - .../codegangsta/negroni/recovery_test.go | 28 - .../codegangsta/negroni/response_writer.go | 96 - .../negroni/response_writer_test.go | 150 -- .../github.com/codegangsta/negroni/static.go | 84 - .../codegangsta/negroni/static_test.go | 113 - .../negroni/translations/README_pt_br.md | 170 -- .../elazarl/go-bindata-assetfs/LICENSE | 23 - .../elazarl/go-bindata-assetfs/README.md | 46 - .../elazarl/go-bindata-assetfs/assetfs.go | 147 -- .../elazarl/go-bindata-assetfs/doc.go | 13 - .../go-bindata-assetfs/main.go | 97 - .../fsouza/go-dockerclient/.gitignore | 2 - .../fsouza/go-dockerclient/.travis.yml | 12 - .../github.com/fsouza/go-dockerclient/AUTHORS | 99 - .../fsouza/go-dockerclient/DOCKER-LICENSE | 6 - .../github.com/fsouza/go-dockerclient/LICENSE | 22 - .../fsouza/go-dockerclient/Makefile | 47 - .../fsouza/go-dockerclient/README.markdown | 106 - .../github.com/fsouza/go-dockerclient/auth.go | 138 -- .../fsouza/go-dockerclient/auth_test.go | 91 - .../fsouza/go-dockerclient/build_test.go | 154 -- .../fsouza/go-dockerclient/change.go | 43 - .../fsouza/go-dockerclient/change_test.go | 26 - .../fsouza/go-dockerclient/client.go | 847 ------- .../fsouza/go-dockerclient/client_test.go | 423 ---- .../fsouza/go-dockerclient/container.go | 1122 ---------- .../fsouza/go-dockerclient/container_test.go | 1981 ----------------- .../github.com/fsouza/go-dockerclient/env.go | 168 -- .../fsouza/go-dockerclient/env_test.go | 351 --- .../fsouza/go-dockerclient/event.go | 304 --- .../fsouza/go-dockerclient/event_test.go | 132 -- .../fsouza/go-dockerclient/example_test.go | 168 -- .../github.com/fsouza/go-dockerclient/exec.go | 181 -- .../fsouza/go-dockerclient/exec_test.go | 262 --- .../github.com/Sirupsen/logrus/CHANGELOG.md | 26 - .../github.com/Sirupsen/logrus/LICENSE | 21 - .../github.com/Sirupsen/logrus/README.md | 355 --- .../github.com/Sirupsen/logrus/entry.go | 254 --- .../github.com/Sirupsen/logrus/entry_test.go | 53 - .../github.com/Sirupsen/logrus/exported.go | 188 -- .../github.com/Sirupsen/logrus/formatter.go | 48 - .../Sirupsen/logrus/formatter_bench_test.go | 98 - .../github.com/Sirupsen/logrus/hook_test.go | 122 - .../github.com/Sirupsen/logrus/hooks.go | 34 - .../Sirupsen/logrus/json_formatter.go | 41 - .../Sirupsen/logrus/json_formatter_test.go | 120 - .../github.com/Sirupsen/logrus/logger.go | 206 -- .../github.com/Sirupsen/logrus/logrus.go | 94 - .../github.com/Sirupsen/logrus/logrus_test.go | 301 --- .../Sirupsen/logrus/terminal_bsd.go | 9 - .../Sirupsen/logrus/terminal_freebsd.go | 20 - .../Sirupsen/logrus/terminal_linux.go | 12 - .../Sirupsen/logrus/terminal_notwindows.go | 21 - .../Sirupsen/logrus/terminal_openbsd.go | 7 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../Sirupsen/logrus/text_formatter.go | 158 -- .../Sirupsen/logrus/text_formatter_test.go | 61 - .../github.com/Sirupsen/logrus/writer.go | 31 - .../github.com/docker/docker/opts/envfile.go | 62 - .../docker/docker/opts/envfile_test.go | 133 -- .../docker/docker/opts/hosts_unix.go | 7 - .../docker/docker/opts/hosts_windows.go | 7 - .../github.com/docker/docker/opts/ip.go | 35 - .../github.com/docker/docker/opts/ip_test.go | 54 - .../github.com/docker/docker/opts/opts.go | 323 --- .../docker/docker/opts/opts_test.go | 479 ---- .../github.com/docker/docker/opts/ulimit.go | 47 - .../docker/docker/opts/ulimit_test.go | 42 - .../docker/docker/pkg/archive/README.md | 1 - .../docker/docker/pkg/archive/archive.go | 902 -------- .../docker/docker/pkg/archive/archive_test.go | 1204 ---------- .../docker/docker/pkg/archive/archive_unix.go | 89 - .../docker/pkg/archive/archive_unix_test.go | 60 - .../docker/pkg/archive/archive_windows.go | 50 - .../pkg/archive/archive_windows_test.go | 65 - .../docker/docker/pkg/archive/changes.go | 383 ---- .../docker/pkg/archive/changes_linux.go | 285 --- .../docker/pkg/archive/changes_other.go | 97 - .../docker/pkg/archive/changes_posix_test.go | 127 -- .../docker/docker/pkg/archive/changes_test.go | 495 ---- .../docker/docker/pkg/archive/changes_unix.go | 27 - .../docker/pkg/archive/changes_windows.go | 20 - .../docker/docker/pkg/archive/copy.go | 308 --- .../docker/docker/pkg/archive/copy_test.go | 637 ------ .../docker/docker/pkg/archive/copy_unix.go | 11 - .../docker/docker/pkg/archive/copy_windows.go | 9 - .../docker/docker/pkg/archive/diff.go | 210 -- .../docker/docker/pkg/archive/diff_test.go | 190 -- .../docker/pkg/archive/example_changes.go | 97 - .../docker/docker/pkg/archive/time_linux.go | 16 - .../docker/pkg/archive/time_unsupported.go | 16 - .../docker/docker/pkg/archive/utils_test.go | 166 -- .../docker/docker/pkg/archive/wrap.go | 59 - .../docker/docker/pkg/archive/wrap_test.go | 98 - .../docker/docker/pkg/fileutils/fileutils.go | 196 -- .../docker/pkg/fileutils/fileutils_test.go | 402 ---- .../docker/docker/pkg/homedir/homedir.go | 39 - .../docker/docker/pkg/homedir/homedir_test.go | 24 - .../docker/docker/pkg/ioutils/fmt.go | 14 - .../docker/docker/pkg/ioutils/fmt_test.go | 17 - .../docker/docker/pkg/ioutils/multireader.go | 226 -- .../docker/pkg/ioutils/multireader_test.go | 149 -- .../docker/docker/pkg/ioutils/readers.go | 254 --- .../docker/docker/pkg/ioutils/readers_test.go | 216 -- .../docker/docker/pkg/ioutils/scheduler.go | 6 - .../docker/pkg/ioutils/scheduler_gccgo.go | 13 - .../docker/docker/pkg/ioutils/writeflusher.go | 47 - .../docker/docker/pkg/ioutils/writers.go | 60 - .../docker/docker/pkg/ioutils/writers_test.go | 65 - .../docker/docker/pkg/mflag/LICENSE | 27 - .../docker/docker/pkg/mflag/README.md | 40 - .../docker/docker/pkg/mflag/flag.go | 1201 ---------- .../docker/docker/pkg/mflag/flag_test.go | 516 ----- .../docker/docker/pkg/parsers/parsers.go | 187 -- .../docker/docker/pkg/parsers/parsers_test.go | 210 -- .../docker/docker/pkg/pools/pools.go | 119 - .../docker/docker/pkg/pools/pools_test.go | 162 -- .../docker/docker/pkg/promise/promise.go | 11 - .../docker/docker/pkg/stdcopy/stdcopy.go | 168 -- .../docker/docker/pkg/stdcopy/stdcopy_test.go | 85 - .../docker/docker/pkg/system/errors.go | 9 - .../docker/pkg/system/events_windows.go | 83 - .../docker/docker/pkg/system/filesys.go | 11 - .../docker/pkg/system/filesys_windows.go | 64 - .../docker/docker/pkg/system/lstat.go | 19 - .../docker/docker/pkg/system/lstat_test.go | 28 - .../docker/docker/pkg/system/lstat_windows.go | 29 - .../docker/docker/pkg/system/meminfo.go | 17 - .../docker/docker/pkg/system/meminfo_linux.go | 71 - .../docker/pkg/system/meminfo_linux_test.go | 38 - .../docker/pkg/system/meminfo_unsupported.go | 7 - .../docker/pkg/system/meminfo_windows.go | 44 - .../docker/docker/pkg/system/mknod.go | 20 - .../docker/docker/pkg/system/mknod_windows.go | 11 - .../docker/docker/pkg/system/stat.go | 46 - .../docker/docker/pkg/system/stat_freebsd.go | 27 - .../docker/docker/pkg/system/stat_linux.go | 33 - .../docker/docker/pkg/system/stat_test.go | 37 - .../docker/pkg/system/stat_unsupported.go | 17 - .../docker/docker/pkg/system/stat_windows.go | 36 - .../docker/docker/pkg/system/umask.go | 11 - .../docker/docker/pkg/system/umask_windows.go | 8 - .../docker/docker/pkg/system/utimes_darwin.go | 11 - .../docker/pkg/system/utimes_freebsd.go | 24 - .../docker/docker/pkg/system/utimes_linux.go | 28 - .../docker/docker/pkg/system/utimes_test.go | 66 - .../docker/pkg/system/utimes_unsupported.go | 13 - .../docker/docker/pkg/system/xattrs_linux.go | 59 - .../docker/pkg/system/xattrs_unsupported.go | 11 - .../docker/docker/pkg/ulimit/ulimit.go | 111 - .../docker/docker/pkg/ulimit/ulimit_test.go | 55 - .../docker/docker/pkg/units/duration.go | 33 - .../docker/docker/pkg/units/duration_test.go | 46 - .../docker/docker/pkg/units/size.go | 95 - .../docker/docker/pkg/units/size_test.go | 108 - .../github.com/docker/docker/volume/volume.go | 61 - .../docker/libcontainer/user/MAINTAINERS | 2 - .../docker/libcontainer/user/lookup.go | 108 - .../docker/libcontainer/user/lookup_unix.go | 30 - .../libcontainer/user/lookup_unsupported.go | 21 - .../docker/libcontainer/user/user.go | 407 ---- .../docker/libcontainer/user/user_test.go | 443 ---- .../github.com/gorilla/context/LICENSE | 27 - .../github.com/gorilla/context/README.md | 7 - .../github.com/gorilla/context/context.go | 143 -- .../gorilla/context/context_test.go | 161 -- .../github.com/gorilla/context/doc.go | 82 - .../external/github.com/gorilla/mux/LICENSE | 27 - .../external/github.com/gorilla/mux/README.md | 235 -- .../github.com/gorilla/mux/bench_test.go | 21 - .../external/github.com/gorilla/mux/doc.go | 206 -- .../external/github.com/gorilla/mux/mux.go | 469 ---- .../github.com/gorilla/mux/mux_test.go | 1334 ----------- .../github.com/gorilla/mux/old_test.go | 714 ------ .../external/github.com/gorilla/mux/regexp.go | 317 --- .../external/github.com/gorilla/mux/route.go | 603 ----- .../runc/libcontainer/user/MAINTAINERS | 2 - .../runc/libcontainer/user/lookup.go | 108 - .../runc/libcontainer/user/lookup_unix.go | 30 - .../libcontainer/user/lookup_unsupported.go | 21 - .../runc/libcontainer/user/user.go | 413 ---- .../runc/libcontainer/user/user_test.go | 436 ---- .../fsouza/go-dockerclient/image.go | 546 ----- .../fsouza/go-dockerclient/image_test.go | 971 -------- .../github.com/fsouza/go-dockerclient/misc.go | 59 - .../fsouza/go-dockerclient/misc_test.go | 159 -- .../fsouza/go-dockerclient/network.go | 127 -- .../fsouza/go-dockerclient/network_test.go | 96 - .../fsouza/go-dockerclient/signal.go | 49 - .../github.com/fsouza/go-dockerclient/tar.go | 117 - .../testing/data/.dockerignore | 3 - .../go-dockerclient/testing/data/Dockerfile | 15 - .../go-dockerclient/testing/data/barfile | 0 .../go-dockerclient/testing/data/ca.pem | 18 - .../go-dockerclient/testing/data/cert.pem | 18 - .../testing/data/container.tar | Bin 2048 -> 0 bytes .../testing/data/dockerfile.tar | Bin 2560 -> 0 bytes .../go-dockerclient/testing/data/foofile | 0 .../go-dockerclient/testing/data/key.pem | 27 - .../go-dockerclient/testing/data/server.pem | 18 - .../testing/data/serverkey.pem | 27 - .../fsouza/go-dockerclient/testing/server.go | 1087 --------- .../go-dockerclient/testing/server_test.go | 1843 --------------- .../github.com/fsouza/go-dockerclient/tls.go | 96 - .../fsouza/go-dockerclient/volume.go | 118 - .../fsouza/go-dockerclient/volume_test.go | 142 -- .../gambol99/go-marathon/.gitignore | 38 - .../gambol99/go-marathon/.travis.yml | 13 - .../github.com/gambol99/go-marathon/AUTHORS | 11 - .../github.com/gambol99/go-marathon/LICENSE | 201 -- .../github.com/gambol99/go-marathon/Makefile | 27 - .../github.com/gambol99/go-marathon/README.md | 160 -- .../gambol99/go-marathon/application.go | 498 ----- .../gambol99/go-marathon/application_test.go | 118 - .../github.com/gambol99/go-marathon/client.go | 371 --- .../gambol99/go-marathon/client_test.go | 35 - .../gambol99/go-marathon/cluster.go | 219 -- .../gambol99/go-marathon/cluster_test.go | 67 - .../github.com/gambol99/go-marathon/config.go | 54 - .../github.com/gambol99/go-marathon/const.go | 36 - .../gambol99/go-marathon/deployment.go | 132 -- .../gambol99/go-marathon/deployment_test.go | 45 - .../github.com/gambol99/go-marathon/docker.go | 133 -- .../github.com/gambol99/go-marathon/events.go | 252 --- .../go-marathon/examples/applications/main.go | 110 - .../go-marathon/examples/events/main.go | 91 - .../go-marathon/examples/groups/main.go | 118 - .../examples/multiple_endpoints/main.go | 59 - .../github.com/gambol99/go-marathon/group.go | 185 -- .../gambol99/go-marathon/group_test.go | 62 - .../github.com/gambol99/go-marathon/health.go | 53 - .../github.com/gambol99/go-marathon/info.go | 87 - .../gambol99/go-marathon/info_test.go | 46 - .../gambol99/go-marathon/last_task_failure.go | 26 - .../gambol99/go-marathon/subscription.go | 261 --- .../gambol99/go-marathon/subscription_test.go | 32 - .../github.com/gambol99/go-marathon/task.go | 198 -- .../gambol99/go-marathon/task_test.go | 58 - .../gambol99/go-marathon/testing_test.go | 93 - .../go-marathon/tests/rest-api/Gemfile | 4 - .../go-marathon/tests/rest-api/config.ru | 7 - .../go-marathon/tests/rest-api/main.go | 89 - .../go-marathon/tests/rest-api/methods.yml | 902 -------- .../go-marathon/tests/rest-api/rest-api.rb | 33 - .../gambol99/go-marathon/update_strategy.go | 22 - .../github.com/gambol99/go-marathon/utils.go | 104 - .../gambol99/go-marathon/utils_test.go | 37 - .../gambol99/go-marathon/version.go | 19 - .../github.com/gorilla/context/.travis.yml | 9 - .../src/github.com/gorilla/context/LICENSE | 27 - .../src/github.com/gorilla/context/README.md | 7 - .../src/github.com/gorilla/context/context.go | 143 -- .../gorilla/context/context_test.go | 161 -- .../src/github.com/gorilla/context/doc.go | 82 - .../github.com/gorilla/handlers/.travis.yml | 8 - .../src/github.com/gorilla/handlers/LICENSE | 22 - .../src/github.com/gorilla/handlers/README.md | 52 - .../github.com/gorilla/handlers/canonical.go | 71 - .../gorilla/handlers/canonical_test.go | 110 - .../github.com/gorilla/handlers/compress.go | 90 - .../gorilla/handlers/compress_test.go | 87 - .../src/github.com/gorilla/handlers/doc.go | 9 - .../github.com/gorilla/handlers/handlers.go | 388 ---- .../gorilla/handlers/handlers_test.go | 305 --- .../gorilla/handlers/proxy_headers.go | 113 - .../gorilla/handlers/proxy_headers_test.go | 100 - .../src/github.com/gorilla/mux/.travis.yml | 7 - .../src/github.com/gorilla/mux/LICENSE | 27 - .../src/github.com/gorilla/mux/README.md | 7 - .../src/github.com/gorilla/mux/bench_test.go | 21 - .../src/github.com/gorilla/mux/doc.go | 206 -- .../src/github.com/gorilla/mux/mux.go | 465 ---- .../src/github.com/gorilla/mux/mux_test.go | 1195 ---------- .../src/github.com/gorilla/mux/old_test.go | 714 ------ .../src/github.com/gorilla/mux/regexp.go | 295 --- .../src/github.com/gorilla/mux/route.go | 603 ----- .../github.com/hashicorp/consul/api/README.md | 39 - .../github.com/hashicorp/consul/api/acl.go | 140 -- .../github.com/hashicorp/consul/api/agent.go | 335 --- .../github.com/hashicorp/consul/api/api.go | 442 ---- .../hashicorp/consul/api/catalog.go | 182 -- .../github.com/hashicorp/consul/api/event.go | 104 - .../github.com/hashicorp/consul/api/health.go | 136 -- .../src/github.com/hashicorp/consul/api/kv.go | 240 -- .../github.com/hashicorp/consul/api/lock.go | 338 --- .../github.com/hashicorp/consul/api/raw.go | 24 - .../hashicorp/consul/api/semaphore.go | 477 ---- .../hashicorp/consul/api/session.go | 201 -- .../github.com/hashicorp/consul/api/status.go | 43 - .../mailgun/oxy/cbreaker/cbreaker.go | 359 --- .../github.com/mailgun/oxy/cbreaker/effect.go | 78 - .../mailgun/oxy/cbreaker/fallback.go | 56 - .../mailgun/oxy/cbreaker/predicates.go | 232 -- .../github.com/mailgun/oxy/cbreaker/ratio.go | 70 - .../src/github.com/mailgun/oxy/forward/fwd.go | 151 -- .../mailgun/oxy/forward/fwd_test.go | 289 --- .../github.com/mailgun/oxy/forward/headers.go | 31 - .../github.com/mailgun/oxy/forward/rewrite.go | 48 - .../mailgun/oxy/memmetrics/anomaly.go | 99 - .../mailgun/oxy/memmetrics/anomaly_test.go | 158 -- .../mailgun/oxy/memmetrics/counter.go | 155 -- .../mailgun/oxy/memmetrics/counter_test.go | 34 - .../mailgun/oxy/memmetrics/histogram.go | 174 -- .../mailgun/oxy/memmetrics/histogram_test.go | 127 -- .../mailgun/oxy/memmetrics/ratio.go | 120 - .../mailgun/oxy/memmetrics/ratio_test.go | 170 -- .../mailgun/oxy/memmetrics/roundtrip.go | 228 -- .../mailgun/oxy/memmetrics/roundtrip_test.go | 81 - .../mailgun/oxy/roundrobin/rebalancer.go | 431 ---- .../mailgun/oxy/roundrobin/rebalancer_test.go | 344 --- .../github.com/mailgun/oxy/roundrobin/rr.go | 267 --- .../mailgun/oxy/roundrobin/rr_test.go | 216 -- .../src/github.com/mailgun/oxy/utils/auth.go | 41 - .../github.com/mailgun/oxy/utils/auth_test.go | 62 - .../github.com/mailgun/oxy/utils/handler.go | 38 - .../mailgun/oxy/utils/handler_test.go | 34 - .../github.com/mailgun/oxy/utils/logging.go | 86 - .../github.com/mailgun/oxy/utils/netutils.go | 121 - .../mailgun/oxy/utils/netutils_test.go | 68 - .../github.com/mailgun/oxy/utils/source.go | 57 - .../github.com/mailgun/predicate/.gitignore | 24 - .../src/github.com/mailgun/predicate/LICENSE | 202 -- .../src/github.com/mailgun/predicate/Makefile | 13 - .../github.com/mailgun/predicate/README.md | 53 - .../src/github.com/mailgun/predicate/parse.go | 181 -- .../github.com/mailgun/predicate/predicate.go | 71 - .../src/github.com/mailgun/timetools/LICENSE | 202 -- .../github.com/mailgun/timetools/README.md | 4 - .../github.com/mailgun/timetools/provider.go | 104 - .../mailgun/timetools/provider_test.go | 93 - .../mailgun/timetools/rfc2822time.go | 59 - .../src/github.com/op/go-logging/.travis.yml | 6 - .../src/github.com/op/go-logging/CONTRIBUTORS | 5 - .../src/github.com/op/go-logging/LICENSE | 27 - .../src/github.com/op/go-logging/README.md | 89 - .../src/github.com/op/go-logging/backend.go | 39 - .../github.com/op/go-logging/example_test.go | 40 - .../op/go-logging/examples/example.go | 49 - .../op/go-logging/examples/example.png | Bin 17675 -> 0 bytes .../src/github.com/op/go-logging/format.go | 368 --- .../github.com/op/go-logging/format_test.go | 184 -- .../src/github.com/op/go-logging/level.go | 124 -- .../github.com/op/go-logging/level_test.go | 76 - .../src/github.com/op/go-logging/log.go | 80 - .../src/github.com/op/go-logging/log_test.go | 118 - .../src/github.com/op/go-logging/logger.go | 218 -- .../github.com/op/go-logging/logger_test.go | 53 - .../src/github.com/op/go-logging/memory.go | 237 -- .../github.com/op/go-logging/memory_test.go | 117 - .../src/github.com/op/go-logging/multi.go | 65 - .../github.com/op/go-logging/multi_test.go | 51 - .../src/github.com/op/go-logging/syslog.go | 52 - .../op/go-logging/syslog_fallback.go | 28 - .../src/github.com/thoas/stats/.gitignore | 26 - .../src/github.com/thoas/stats/.travis.yml | 7 - .../src/github.com/thoas/stats/LICENSE | 22 - .../src/github.com/thoas/stats/Makefile | 4 - .../src/github.com/thoas/stats/README.rst | 251 --- .../thoas/stats/examples/beego/server.go | 34 - .../thoas/stats/examples/gin-gonic/server.go | 34 - .../thoas/stats/examples/gocraft/server.go | 46 - .../thoas/stats/examples/goji/server.go | 29 - .../thoas/stats/examples/httprouter/server.go | 22 - .../thoas/stats/examples/martini/server.go | 36 - .../thoas/stats/examples/negroni/server.go | 34 - .../thoas/stats/examples/nethttp/server.go | 16 - .../src/github.com/thoas/stats/recorder.go | 60 - .../src/github.com/thoas/stats/stats.go | 155 -- .../src/github.com/thoas/stats/stats_test.go | 62 - .../src/github.com/tylerb/graceful/.gitignore | 23 - .../src/github.com/tylerb/graceful/LICENSE | 21 - .../src/github.com/tylerb/graceful/README.md | 137 -- .../github.com/tylerb/graceful/graceful.go | 334 --- .../tylerb/graceful/graceful_test.go | 401 ---- .../github.com/tylerb/graceful/tests/main.go | 40 - .../src/github.com/unrolled/render/.gitignore | 27 - .../github.com/unrolled/render/.travis.yml | 13 - .../src/github.com/unrolled/render/LICENSE | 20 - .../src/github.com/unrolled/render/README.md | 475 ---- .../src/github.com/unrolled/render/buffer.go | 41 - .../src/github.com/unrolled/render/doc.go | 55 - .../src/github.com/unrolled/render/engine.go | 202 -- .../render/engine_integration_test.go | 69 - .../render/fixtures/amber/example.amber | 8 - .../render/fixtures/amber/layouts/base.amber | 11 - .../render/fixtures/basic/admin/index.tmpl | 1 - .../render/fixtures/basic/another_layout.tmpl | 3 - .../render/fixtures/basic/content.tmpl | 1 - .../render/fixtures/basic/current_layout.tmpl | 3 - .../render/fixtures/basic/delims.tmpl | 1 - .../unrolled/render/fixtures/basic/hello.tmpl | 1 - .../render/fixtures/basic/hypertext.html | 1 - .../render/fixtures/basic/layout.tmpl | 3 - .../render/fixtures/blocks/content.tmpl | 2 - .../render/fixtures/blocks/layout.tmpl | 3 - .../render/fixtures/custom_funcs/index.tmpl | 1 - .../render/fixtures/template-dir-test/0.tmpl | 0 .../dedicated.tmpl/notbad.tmpl | 0 .../fixtures/template-dir-test/subdir/1.tmpl | 0 .../src/github.com/unrolled/render/render.go | 408 ---- .../unrolled/render/render_data_test.go | 44 - .../unrolled/render/render_html_test.go | 336 --- .../unrolled/render/render_json_test.go | 208 -- .../unrolled/render/render_jsonp_test.go | 61 - .../github.com/unrolled/render/render_test.go | 48 - .../unrolled/render/render_text_test.go | 62 - .../unrolled/render/render_xml_test.go | 85 - .../src/golang.org/x/net/netutil/listen.go | 48 - .../golang.org/x/net/netutil/listen_test.go | 106 - .../alecthomas/kingpin.v2/.travis.yml | 3 - .../gopkg.in/alecthomas/kingpin.v2/COPYING | 19 - .../gopkg.in/alecthomas/kingpin.v2/README.md | 538 ----- .../src/gopkg.in/alecthomas/kingpin.v2/app.go | 573 ----- .../alecthomas/kingpin.v2/app_test.go | 197 -- .../gopkg.in/alecthomas/kingpin.v2/args.go | 106 - .../alecthomas/kingpin.v2/args_test.go | 48 - .../src/gopkg.in/alecthomas/kingpin.v2/cmd.go | 135 -- .../alecthomas/kingpin.v2/cmd_test.go | 110 - .../src/gopkg.in/alecthomas/kingpin.v2/doc.go | 68 - .../kingpin.v2/examples/chat1/main.go | 20 - .../kingpin.v2/examples/chat2/main.go | 38 - .../kingpin.v2/examples/curl/main.go | 105 - .../kingpin.v2/examples/modular/main.go | 30 - .../kingpin.v2/examples/ping/main.go | 20 - .../alecthomas/kingpin.v2/examples_test.go | 42 - .../gopkg.in/alecthomas/kingpin.v2/flags.go | 235 -- .../alecthomas/kingpin.v2/flags_test.go | 108 - .../alecthomas/kingpin.v2/genrepeated/main.go | 75 - .../gopkg.in/alecthomas/kingpin.v2/global.go | 88 - .../alecthomas/kingpin.v2/guesswidth.go | 9 - .../alecthomas/kingpin.v2/guesswidth_unix.go | 38 - .../gopkg.in/alecthomas/kingpin.v2/model.go | 217 -- .../gopkg.in/alecthomas/kingpin.v2/parser.go | 339 --- .../alecthomas/kingpin.v2/parser_test.go | 26 - .../gopkg.in/alecthomas/kingpin.v2/parsers.go | 265 --- .../alecthomas/kingpin.v2/parsers_test.go | 90 - .../alecthomas/kingpin.v2/repeated.go | 107 - .../alecthomas/kingpin.v2/repeated.json | 11 - .../alecthomas/kingpin.v2/templates.go | 182 -- .../gopkg.in/alecthomas/kingpin.v2/usage.go | 178 -- .../alecthomas/kingpin.v2/usage_test.go | 67 - .../gopkg.in/alecthomas/kingpin.v2/values.go | 507 ----- .../alecthomas/kingpin.v2/values_test.go | 46 - .../src/gopkg.in/fsnotify.v1/.gitignore | 6 - .../src/gopkg.in/fsnotify.v1/.travis.yml | 15 - .../src/gopkg.in/fsnotify.v1/AUTHORS | 34 - .../src/gopkg.in/fsnotify.v1/CHANGELOG.md | 263 --- .../src/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 - .../src/gopkg.in/fsnotify.v1/LICENSE | 28 - .../gopkg.in/fsnotify.v1/NotUsed.xcworkspace | 0 .../src/gopkg.in/fsnotify.v1/README.md | 59 - .../src/gopkg.in/fsnotify.v1/circle.yml | 26 - .../src/gopkg.in/fsnotify.v1/example_test.go | 42 - .../src/gopkg.in/fsnotify.v1/fsnotify.go | 62 - .../src/gopkg.in/fsnotify.v1/inotify.go | 306 --- .../gopkg.in/fsnotify.v1/inotify_poller.go | 186 -- .../fsnotify.v1/inotify_poller_test.go | 228 -- .../src/gopkg.in/fsnotify.v1/inotify_test.go | 292 --- .../gopkg.in/fsnotify.v1/integration_test.go | 1135 ---------- .../src/gopkg.in/fsnotify.v1/kqueue.go | 463 ---- .../src/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 - .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 - .../src/gopkg.in/fsnotify.v1/windows.go | 561 ----- .../src/gopkg.in/mgo.v2/bson/LICENSE | 25 - .../src/gopkg.in/mgo.v2/bson/bson.go | 705 ------ .../src/gopkg.in/mgo.v2/bson/bson_test.go | 1605 ------------- .../src/gopkg.in/mgo.v2/bson/decode.go | 825 ------- .../src/gopkg.in/mgo.v2/bson/encode.go | 503 ----- 583 files changed, 1 insertion(+), 87909 deletions(-) delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/COPYING delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/Makefile delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/README.md delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/data/doc.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/doc.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/doc.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/fun_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_parmap_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort_test.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/fun/util.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/perf/builtin-vs-reflect.bench delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/session.vim delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/type-check.go delete mode 100644 Godeps/_workspace/src/github.com/BurntSushi/ty/tyvars.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/README.md delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/doc.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/example_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/examplefiles_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/examplefunc_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/exec.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/exec_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/funcs.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/helper.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/multi_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/parse/lex_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/parse/parse_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/template.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/testdata/file1.tmpl delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/testdata/file2.tmpl delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl1.tmpl delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl2.tmpl delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/COPYING delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/README.md delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/bytes.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/bytes_test.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/doc.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/si.go delete mode 100644 Godeps/_workspace/src/github.com/alecthomas/units/util.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/README.md delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go delete mode 100644 Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go delete mode 100644 Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/README.md delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/doc.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/logger.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/logger_test.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/negroni.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/negroni_test.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/recovery.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/recovery_test.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer_test.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/static.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/static_test.go delete mode 100644 Godeps/_workspace/src/github.com/codegangsta/negroni/translations/README_pt_br.md delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go delete mode 100644 Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go delete mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/AUTHORS delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/Makefile delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/README.md delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/application.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/application_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/client.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/client_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/config.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/const.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/docker.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/events.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/applications/main.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/events/main.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/groups/main.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/multiple_endpoints/main.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/group.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/group_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/health.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/info.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/info_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/last_task_failure.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/task.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/task_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/testing_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/Gemfile delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/config.ru delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/main.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/methods.yml delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/rest-api.rb delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/update_strategy.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/utils.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/utils_test.go delete mode 100644 Godeps/_workspace/src/github.com/gambol99/go-marathon/version.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/context/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/gorilla/context/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/gorilla/context/README.md delete mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/context/context_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/context/doc.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/README.md delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/canonical.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/canonical_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/compress.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/doc.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/README.md delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/doc.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/old_test.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/regexp.go delete mode 100644 Godeps/_workspace/src/github.com/gorilla/mux/route.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go delete mode 100644 Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/cbreaker.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/effect.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/fallback.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/predicates.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/ratio.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/forward/headers.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/forward/rewrite.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/logging.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/oxy/utils/source.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/predicate/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/mailgun/predicate/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/mailgun/predicate/Makefile delete mode 100644 Godeps/_workspace/src/github.com/mailgun/predicate/README.md delete mode 100644 Godeps/_workspace/src/github.com/mailgun/predicate/parse.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/predicate/predicate.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/timetools/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/mailgun/timetools/README.md delete mode 100644 Godeps/_workspace/src/github.com/mailgun/timetools/provider.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/timetools/provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/mailgun/timetools/rfc2822time.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/README.md delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/backend.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/example_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/examples/example.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/examples/example.png delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/format.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/format_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/level.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/level_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/log.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/log_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/logger.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/logger_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/memory.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/memory_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/multi.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/multi_test.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/syslog.go delete mode 100644 Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/Makefile delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/README.rst delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/beego/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/gin-gonic/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/gocraft/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/goji/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/httprouter/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/martini/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/negroni/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/examples/nethttp/server.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/recorder.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/stats.go delete mode 100644 Godeps/_workspace/src/github.com/thoas/stats/stats_test.go delete mode 100644 Godeps/_workspace/src/github.com/tylerb/graceful/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/tylerb/graceful/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/tylerb/graceful/README.md delete mode 100644 Godeps/_workspace/src/github.com/tylerb/graceful/graceful.go delete mode 100644 Godeps/_workspace/src/github.com/tylerb/graceful/graceful_test.go delete mode 100644 Godeps/_workspace/src/github.com/tylerb/graceful/tests/main.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/README.md delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/buffer.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/doc.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/engine.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/engine_integration_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/amber/example.amber delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/amber/layouts/base.amber delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/admin/index.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/another_layout.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/content.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/current_layout.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/delims.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/hello.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/hypertext.html delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/basic/layout.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/blocks/content.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/blocks/layout.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/custom_funcs/index.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/template-dir-test/0.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/template-dir-test/dedicated.tmpl/notbad.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/fixtures/template-dir-test/subdir/1.tmpl delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_data_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_html_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_json_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_jsonp_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_text_test.go delete mode 100644 Godeps/_workspace/src/github.com/unrolled/render/render_xml_test.go delete mode 100644 Godeps/_workspace/src/golang.org/x/net/netutil/listen.go delete mode 100644 Godeps/_workspace/src/golang.org/x/net/netutil/listen_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/.travis.yml delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/COPYING delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/README.md delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/app.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/app_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/args.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/args_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/doc.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat1/main.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat2/main.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/curl/main.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/modular/main.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/ping/main.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/flags.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/flags_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/genrepeated/main.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/global.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/model.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parser.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parser_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parsers.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parsers_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/repeated.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/repeated.json delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/templates.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/usage.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/usage_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.go delete mode 100644 Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/.gitignore delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/.travis.yml delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/AUTHORS delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/CHANGELOG.md delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/LICENSE delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/NotUsed.xcworkspace delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/README.md delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/circle.yml delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/example_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/fsnotify.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_poller_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/inotify_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/integration_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/kqueue.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_bsd.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/open_mode_darwin.go delete mode 100644 Godeps/_workspace/src/gopkg.in/fsnotify.v1/windows.go delete mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/LICENSE delete mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson.go delete mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/bson_test.go delete mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/decode.go delete mode 100644 Godeps/_workspace/src/gopkg.in/mgo.v2/bson/encode.go diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore index f037d684e..e6b58bb02 100644 --- a/Godeps/_workspace/.gitignore +++ b/Godeps/_workspace/.gitignore @@ -1,2 +1,3 @@ /pkg /bin +/src \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore b/Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index 0cd380037..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml b/Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 43caf6d02..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE b/Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 21e0938ca..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile b/Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848d3..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md deleted file mode 100644 index 380bb36bb..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,220 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/mojombo/toml - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Documentation: http://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) - - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md deleted file mode 100644 index 24421eb70..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for my -[toml parser written in Go](https://github.com/BurntSushi/toml). -In particular, it maps TOML data on `stdin` to a JSON format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go deleted file mode 100644 index 14e755700..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/main.go +++ /dev/null @@ -1,90 +0,0 @@ -// Command toml-test-decoder satisfies the toml-test interface for testing -// TOML decoders. Namely, it accepts TOML on stdin and outputs JSON on stdout. -package main - -import ( - "encoding/json" - "flag" - "fmt" - "log" - "os" - "path" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < toml-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if _, err := toml.DecodeReader(os.Stdin, &tmp); err != nil { - log.Fatalf("Error decoding TOML: %s", err) - } - - typedTmp := translate(tmp) - if err := json.NewEncoder(os.Stdout).Encode(typedTmp); err != nil { - log.Fatalf("Error encoding JSON: %s", err) - } -} - -func translate(tomlData interface{}) interface{} { - switch orig := tomlData.(type) { - case map[string]interface{}: - typed := make(map[string]interface{}, len(orig)) - for k, v := range orig { - typed[k] = translate(v) - } - return typed - case []map[string]interface{}: - typed := make([]map[string]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v).(map[string]interface{}) - } - return typed - case []interface{}: - typed := make([]interface{}, len(orig)) - for i, v := range orig { - typed[i] = translate(v) - } - - // We don't really need to tag arrays, but let's be future proof. - // (If TOML ever supports tuples, we'll need this.) - return tag("array", typed) - case time.Time: - return tag("datetime", orig.Format("2006-01-02T15:04:05Z")) - case bool: - return tag("bool", fmt.Sprintf("%v", orig)) - case int64: - return tag("integer", fmt.Sprintf("%d", orig)) - case float64: - return tag("float", fmt.Sprintf("%v", orig)) - case string: - return tag("string", orig) - } - - panic(fmt.Sprintf("Unknown type: %T", tomlData)) -} - -func tag(typeName string, data interface{}) map[string]interface{} { - return map[string]interface{}{ - "type": typeName, - "value": data, - } -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md deleted file mode 100644 index 45a603f29..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# Implements the TOML test suite interface for TOML encoders - -This is an implementation of the interface expected by -[toml-test](https://github.com/BurntSushi/toml-test) for the -[TOML encoder](https://github.com/BurntSushi/toml). -In particular, it maps JSON data on `stdin` to a TOML format on `stdout`. - - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Compatible with `toml-test` version -[v0.2.0](https://github.com/BurntSushi/toml-test/tree/v0.2.0) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go deleted file mode 100644 index 092cc6844..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/main.go +++ /dev/null @@ -1,131 +0,0 @@ -// Command toml-test-encoder satisfies the toml-test interface for testing -// TOML encoders. Namely, it accepts JSON on stdin and outputs TOML on stdout. -package main - -import ( - "encoding/json" - "flag" - "log" - "os" - "path" - "strconv" - "time" - - "github.com/BurntSushi/toml" -) - -func init() { - log.SetFlags(0) - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s < json-file\n", path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() != 0 { - flag.Usage() - } - - var tmp interface{} - if err := json.NewDecoder(os.Stdin).Decode(&tmp); err != nil { - log.Fatalf("Error decoding JSON: %s", err) - } - - tomlData := translate(tmp) - if err := toml.NewEncoder(os.Stdout).Encode(tomlData); err != nil { - log.Fatalf("Error encoding TOML: %s", err) - } -} - -func translate(typedJson interface{}) interface{} { - switch v := typedJson.(type) { - case map[string]interface{}: - if len(v) == 2 && in("type", v) && in("value", v) { - return untag(v) - } - m := make(map[string]interface{}, len(v)) - for k, v2 := range v { - m[k] = translate(v2) - } - return m - case []interface{}: - tabArray := make([]map[string]interface{}, len(v)) - for i := range v { - if m, ok := translate(v[i]).(map[string]interface{}); ok { - tabArray[i] = m - } else { - log.Fatalf("JSON arrays may only contain objects. This " + - "corresponds to only tables being allowed in " + - "TOML table arrays.") - } - } - return tabArray - } - log.Fatalf("Unrecognized JSON format '%T'.", typedJson) - panic("unreachable") -} - -func untag(typed map[string]interface{}) interface{} { - t := typed["type"].(string) - v := typed["value"] - switch t { - case "string": - return v.(string) - case "integer": - v := v.(string) - n, err := strconv.Atoi(v) - if err != nil { - log.Fatalf("Could not parse '%s' as integer: %s", v, err) - } - return n - case "float": - v := v.(string) - f, err := strconv.ParseFloat(v, 64) - if err != nil { - log.Fatalf("Could not parse '%s' as float64: %s", v, err) - } - return f - case "datetime": - v := v.(string) - t, err := time.Parse("2006-01-02T15:04:05Z", v) - if err != nil { - log.Fatalf("Could not parse '%s' as a datetime: %s", v, err) - } - return t - case "bool": - v := v.(string) - switch v { - case "true": - return true - case "false": - return false - } - log.Fatalf("Could not parse '%s' as a boolean.", v) - case "array": - v := v.([]interface{}) - array := make([]interface{}, len(v)) - for i := range v { - if m, ok := v[i].(map[string]interface{}); ok { - array[i] = untag(m) - } else { - log.Fatalf("Arrays may only contain other arrays or "+ - "primitive values, but found a '%T'.", m) - } - } - return array - } - log.Fatalf("Unrecognized tag type '%s'.", t) - panic("unreachable") -} - -func in(key string, m map[string]interface{}) bool { - _, ok := m[key] - return ok -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e33254..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md deleted file mode 100644 index 5df0dc32b..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# TOML Validator - -If Go is installed, it's simple to try it out: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -You can see the types of every key in a TOML file with: - -```bash -tomlv -types some-toml-file.toml -``` - -At the moment, only one error message is reported at a time. Error messages -include line numbers. No output means that the files given are valid TOML, or -there is a bug in `tomlv`. - -Compatible with TOML version -[v0.1.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.1.0.md) - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go deleted file mode 100644 index c7d689a7e..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/cmd/tomlv/main.go +++ /dev/null @@ -1,61 +0,0 @@ -// Command tomlv validates TOML documents and prints each key's type. -package main - -import ( - "flag" - "fmt" - "log" - "os" - "path" - "strings" - "text/tabwriter" - - "github.com/BurntSushi/toml" -) - -var ( - flagTypes = false -) - -func init() { - log.SetFlags(0) - - flag.BoolVar(&flagTypes, "types", flagTypes, - "When set, the types of every defined key will be shown.") - - flag.Usage = usage - flag.Parse() -} - -func usage() { - log.Printf("Usage: %s toml-file [ toml-file ... ]\n", - path.Base(os.Args[0])) - flag.PrintDefaults() - - os.Exit(1) -} - -func main() { - if flag.NArg() < 1 { - flag.Usage() - } - for _, f := range flag.Args() { - var tmp interface{} - md, err := toml.DecodeFile(f, &tmp) - if err != nil { - log.Fatalf("Error in '%s': %s", f, err) - } - if flagTypes { - printTypes(md) - } - } -} - -func printTypes(md toml.MetaData) { - tabw := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) - for _, key := range md.Keys() { - fmt.Fprintf(tabw, "%s%s\t%s\n", - strings.Repeat(" ", len(key)-1), key, md.Type(key...)) - } - tabw.Flush() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index b6d75d042..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,472 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -var e = fmt.Errorf - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, rvalue(v)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("Unsupported type '%s'.", rv.Kind()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("Unsupported type '%s'.", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return mismatch(rv, "map", mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return e("Type mismatch for '%s.%s': %s", - rv.Type().String(), f.name, err) - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("Field '%s.%s' is unexported, and therefore cannot "+ - "be loaded with reflection.", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if rv.IsNil() { - rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen)) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("Value '%d' is out of range for int8.", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("Value '%d' is out of range for int16.", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("Value '%d' is out of range for int32.", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("Value '%d' is out of range for uint8.", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("Value '%d' is out of range for uint16.", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("Value '%d' is out of range for uint32.", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanAddr() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("Expected %s but found '%T'.", expected, data) -} - -func mismatch(user reflect.Value, expected string, data interface{}) error { - return e("Type mismatch for %s. Expected %s but found '%T'.", - user.Type().String(), expected, data) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index c8114453b..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,99 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go deleted file mode 100644 index b940333dc..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/decode_test.go +++ /dev/null @@ -1,540 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "reflect" - "testing" - "time" -) - -func init() { - log.SetFlags(0) -} - -func TestDecodeSimple(t *testing.T) { - var testSimple = ` -age = 250 -andrew = "gallant" -kait = "brady" -now = 1987-07-05T05:45:00Z -yesOrNo = true -pi = 3.14 -colors = [ - ["red", "green", "blue"], - ["cyan", "magenta", "yellow", "black"], -] - -[My.Cats] -plato = "cat 1" -cauchy = "cat 2" -` - - type cats struct { - Plato string - Cauchy string - } - type simple struct { - Age int - Colors [][]string - Pi float64 - YesOrNo bool - Now time.Time - Andrew string - Kait string - My map[string]cats - } - - var val simple - _, err := Decode(testSimple, &val) - if err != nil { - t.Fatal(err) - } - - now, err := time.Parse("2006-01-02T15:04:05", "1987-07-05T05:45:00") - if err != nil { - panic(err) - } - var answer = simple{ - Age: 250, - Andrew: "gallant", - Kait: "brady", - Now: now, - YesOrNo: true, - Pi: 3.14, - Colors: [][]string{ - {"red", "green", "blue"}, - {"cyan", "magenta", "yellow", "black"}, - }, - My: map[string]cats{ - "Cats": cats{Plato: "cat 1", Cauchy: "cat 2"}, - }, - } - if !reflect.DeepEqual(val, answer) { - t.Fatalf("Expected\n-----\n%#v\n-----\nbut got\n-----\n%#v\n", - answer, val) - } -} - -func TestDecodeEmbedded(t *testing.T) { - type Dog struct{ Name string } - type Age int - - tests := map[string]struct { - input string - decodeInto interface{} - wantDecoded interface{} - }{ - "embedded struct": { - input: `Name = "milton"`, - decodeInto: &struct{ Dog }{}, - wantDecoded: &struct{ Dog }{Dog{"milton"}}, - }, - "embedded non-nil pointer to struct": { - input: `Name = "milton"`, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{&Dog{"milton"}}, - }, - "embedded nil pointer to struct": { - input: ``, - decodeInto: &struct{ *Dog }{}, - wantDecoded: &struct{ *Dog }{nil}, - }, - "embedded int": { - input: `Age = -5`, - decodeInto: &struct{ Age }{}, - wantDecoded: &struct{ Age }{-5}, - }, - } - - for label, test := range tests { - _, err := Decode(test.input, test.decodeInto) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(test.wantDecoded, test.decodeInto) { - t.Errorf("%s: want decoded == %+v, got %+v", - label, test.wantDecoded, test.decodeInto) - } - } -} - -func TestTableArrays(t *testing.T) { - var tomlTableArrays = ` -[[albums]] -name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] -name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - - type Song struct { - Name string - } - - type Album struct { - Name string - Songs []Song - } - - type Music struct { - Albums []Album - } - - expected := Music{[]Album{ - {"Born to Run", []Song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", []Song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }} - var got Music - if _, err := Decode(tomlTableArrays, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -// Case insensitive matching tests. -// A bit more comprehensive than needed given the current implementation, -// but implementations change. -// Probably still missing demonstrations of some ugly corner cases regarding -// case insensitive matching and multiple fields. -func TestCase(t *testing.T) { - var caseToml = ` -tOpString = "string" -tOpInt = 1 -tOpFloat = 1.1 -tOpBool = true -tOpdate = 2006-01-02T15:04:05Z -tOparray = [ "array" ] -Match = "i should be in Match only" -MatcH = "i should be in MatcH only" -once = "just once" -[nEst.eD] -nEstedString = "another string" -` - - type InsensitiveEd struct { - NestedString string - } - - type InsensitiveNest struct { - Ed InsensitiveEd - } - - type Insensitive struct { - TopString string - TopInt int - TopFloat float64 - TopBool bool - TopDate time.Time - TopArray []string - Match string - MatcH string - Once string - OncE string - Nest InsensitiveNest - } - - tme, err := time.Parse(time.RFC3339, time.RFC3339[:len(time.RFC3339)-5]) - if err != nil { - panic(err) - } - expected := Insensitive{ - TopString: "string", - TopInt: 1, - TopFloat: 1.1, - TopBool: true, - TopDate: tme, - TopArray: []string{"array"}, - MatcH: "i should be in MatcH only", - Match: "i should be in Match only", - Once: "just once", - OncE: "", - Nest: InsensitiveNest{ - Ed: InsensitiveEd{NestedString: "another string"}, - }, - } - var got Insensitive - if _, err := Decode(caseToml, &got); err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, got) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, got) - } -} - -func TestPointers(t *testing.T) { - type Object struct { - Type string - Description string - } - - type Dict struct { - NamedObject map[string]*Object - BaseObject *Object - Strptr *string - Strptrs []*string - } - s1, s2, s3 := "blah", "abc", "def" - expected := &Dict{ - Strptr: &s1, - Strptrs: []*string{&s2, &s3}, - NamedObject: map[string]*Object{ - "foo": {"FOO", "fooooo!!!"}, - "bar": {"BAR", "ba-ba-ba-ba-barrrr!!!"}, - }, - BaseObject: &Object{"BASE", "da base"}, - } - - ex1 := ` -Strptr = "blah" -Strptrs = ["abc", "def"] - -[NamedObject.foo] -Type = "FOO" -Description = "fooooo!!!" - -[NamedObject.bar] -Type = "BAR" -Description = "ba-ba-ba-ba-barrrr!!!" - -[BaseObject] -Type = "BASE" -Description = "da base" -` - dict := new(Dict) - _, err := Decode(ex1, dict) - if err != nil { - t.Errorf("Decode error: %v", err) - } - if !reflect.DeepEqual(expected, dict) { - t.Fatalf("\n%#v\n!=\n%#v\n", expected, dict) - } -} - -type sphere struct { - Center [3]float64 - Radius float64 -} - -func TestDecodeSimpleArray(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.0, 1.5, 0.0]`, &s1); err != nil { - t.Fatal(err) - } -} - -func TestDecodeArrayWrongSize(t *testing.T) { - var s1 sphere - if _, err := Decode(`center = [0.1, 2.3]`, &s1); err == nil { - t.Fatal("Expected array type mismatch error") - } -} - -func TestDecodeLargeIntoSmallInt(t *testing.T) { - type table struct { - Value int8 - } - var tab table - if _, err := Decode(`value = 500`, &tab); err == nil { - t.Fatal("Expected integer out-of-bounds error.") - } -} - -func TestDecodeSizedInts(t *testing.T) { - type table struct { - U8 uint8 - U16 uint16 - U32 uint32 - U64 uint64 - U uint - I8 int8 - I16 int16 - I32 int32 - I64 int64 - I int - } - answer := table{1, 1, 1, 1, 1, -1, -1, -1, -1, -1} - toml := ` - u8 = 1 - u16 = 1 - u32 = 1 - u64 = 1 - u = 1 - i8 = -1 - i16 = -1 - i32 = -1 - i64 = -1 - i = -1 - ` - var tab table - if _, err := Decode(toml, &tab); err != nil { - t.Fatal(err.Error()) - } - if answer != tab { - t.Fatalf("Expected %#v but got %#v", answer, tab) - } -} - -func ExampleMetaData_PrimitiveDecode() { - var md MetaData - var err error - - var tomlBlob = ` -ranking = ["Springsteen", "J Geils"] - -[bands.Springsteen] -started = 1973 -albums = ["Greetings", "WIESS", "Born to Run", "Darkness"] - -[bands.J Geils] -started = 1970 -albums = ["The J. Geils Band", "Full House", "Blow Your Face Out"] -` - - type band struct { - Started int - Albums []string - } - type classics struct { - Ranking []string - Bands map[string]Primitive - } - - // Do the initial decode. Reflection is delayed on Primitive values. - var music classics - if md, err = Decode(tomlBlob, &music); err != nil { - log.Fatal(err) - } - - // MetaData still includes information on Primitive values. - fmt.Printf("Is `bands.Springsteen` defined? %v\n", - md.IsDefined("bands", "Springsteen")) - - // Decode primitive data into Go values. - for _, artist := range music.Ranking { - // A band is a primitive value, so we need to decode it to get a - // real `band` value. - primValue := music.Bands[artist] - - var aBand band - if err = md.PrimitiveDecode(primValue, &aBand); err != nil { - log.Fatal(err) - } - fmt.Printf("%s started in %d.\n", artist, aBand.Started) - } - // Check to see if there were any fields left undecoded. - // Note that this won't be empty before decoding the Primitive value! - fmt.Printf("Undecoded: %q\n", md.Undecoded()) - - // Output: - // Is `bands.Springsteen` defined? true - // Springsteen started in 1973. - // J Geils started in 1970. - // Undecoded: [] -} - -func ExampleDecode() { - var tomlBlob = ` -# Some comments. -[alpha] -ip = "10.0.0.1" - - [alpha.config] - Ports = [ 8001, 8002 ] - Location = "Toronto" - Created = 1987-07-05T05:45:00Z - -[beta] -ip = "10.0.0.2" - - [beta.config] - Ports = [ 9001, 9002 ] - Location = "New Jersey" - Created = 1887-01-05T05:55:00Z -` - - type serverConfig struct { - Ports []int - Location string - Created time.Time - } - - type server struct { - IP string `toml:"ip"` - Config serverConfig `toml:"config"` - } - - type servers map[string]server - - var config servers - if _, err := Decode(tomlBlob, &config); err != nil { - log.Fatal(err) - } - - for _, name := range []string{"alpha", "beta"} { - s := config[name] - fmt.Printf("Server: %s (ip: %s) in %s created on %s\n", - name, s.IP, s.Config.Location, - s.Config.Created.Format("2006-01-02")) - fmt.Printf("Ports: %v\n", s.Config.Ports) - } - - // Output: - // Server: alpha (ip: 10.0.0.1) in Toronto created on 1987-07-05 - // Ports: [8001 8002] - // Server: beta (ip: 10.0.0.2) in New Jersey created on 1887-01-05 - // Ports: [9001 9002] -} - -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} - -// Example Unmarshaler shows how to decode TOML strings into your own -// custom data type. -func Example_unmarshaler() { - blob := ` -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -` - type song struct { - Name string - Duration duration - } - type songs struct { - Song []song - } - var favorites songs - if _, err := Decode(blob, &favorites); err != nil { - log.Fatal(err) - } - - // Code to implement the TextUnmarshaler interface for `duration`: - // - // type duration struct { - // time.Duration - // } - // - // func (d *duration) UnmarshalText(text []byte) error { - // var err error - // d.Duration, err = time.ParseDuration(string(text)) - // return err - // } - - for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) - } - // Output: - // Thunder Road (4m49s) - // Stairway to Heaven (8m3s) -} - -// Example StrictDecoding shows how to detect whether there are keys in the -// TOML document that weren't decoded into the value given. This is useful -// for returning an error to the user if they've included extraneous fields -// in their configuration. -func Example_strictDecoding() { - var blob = ` -key1 = "value1" -key2 = "value2" -key3 = "value3" -` - type config struct { - Key1 string - Key3 string - } - - var conf config - md, err := Decode(blob, &conf) - if err != nil { - log.Fatal(err) - } - fmt.Printf("Undecoded keys: %q\n", md.Undecoded()) - // Output: - // Undecoded keys: ["key2"] -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index fe2680004..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/mojombo/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index 6dd696f58..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,521 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" - "unicode" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "can't encode array with mixed element types") - errArrayNilElement = errors.New( - "can't encode array with nil element") - errNonString = errors.New( - "can't encode a map with non-string key type") - errAnonNonStruct = errors.New( - "can't encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "TOML array element can't contain a table") - errNoKey = errors.New( - "top-level values must be a Go map or struct") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("Unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("Unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key, true) - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.String()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - if len(key) == 1 { - // Output an extra new line between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - panicIfInvalidKey(key, true) - enc.wf("%s[%s]", enc.indentStr(key), key.String()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexporded fields - if f.PkgPath != "" { - continue - } - frv := rv.Field(i) - if f.Anonymous { - frv := eindirect(frv) - t := frv.Type() - if t.Kind() != reflect.Struct { - encPanic(errAnonNonStruct) - } - addFields(t, frv, f.Index) - } else if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - keyName := sft.Tag.Get("toml") - if keyName == "-" { - continue - } - if keyName == "" { - keyName = sft.Name - } - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is used to -// determine whether the types of array elements are mixed (which is forbidden). -// If the Go value is nil, then it is illegal for it to be an array element, and -// valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } else { - return tomlArray - } - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key, false) - enc.wf("%s%s = ", enc.indentStr(key), key[len(key)-1]) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key, hash bool) { - if hash { - for _, k := range key { - if !isValidTableName(k) { - encPanic(e("Key '%s' is not a valid table name. Table names "+ - "cannot contain '[', ']' or '.'.", key.String())) - } - } - } else { - if !isValidKeyName(key[len(key)-1]) { - encPanic(e("Key '%s' is not a name. Key names "+ - "cannot contain whitespace.", key.String())) - } - } -} - -func isValidTableName(s string) bool { - if len(s) == 0 { - return false - } - for _, r := range s { - if r == '[' || r == ']' || r == '.' { - return false - } - } - return true -} - -func isValidKeyName(s string) bool { - if len(s) == 0 { - return false - } - for _, r := range s { - if unicode.IsSpace(r) { - return false - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go deleted file mode 100644 index 74a5ee5d2..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/encode_test.go +++ /dev/null @@ -1,506 +0,0 @@ -package toml - -import ( - "bytes" - "fmt" - "log" - "net" - "testing" - "time" -) - -func TestEncodeRoundTrip(t *testing.T) { - type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time - Ipaddress net.IP - } - - var inputs = Config{ - 13, - []string{"one", "two", "three"}, - 3.145, - []int{11, 2, 3, 4}, - time.Now(), - net.ParseIP("192.168.59.254"), - } - - var firstBuffer bytes.Buffer - e := NewEncoder(&firstBuffer) - err := e.Encode(inputs) - if err != nil { - t.Fatal(err) - } - var outputs Config - if _, err := Decode(firstBuffer.String(), &outputs); err != nil { - log.Printf("Could not decode:\n-----\n%s\n-----\n", - firstBuffer.String()) - t.Fatal(err) - } - - // could test each value individually, but I'm lazy - var secondBuffer bytes.Buffer - e2 := NewEncoder(&secondBuffer) - err = e2.Encode(outputs) - if err != nil { - t.Fatal(err) - } - if firstBuffer.String() != secondBuffer.String() { - t.Error( - firstBuffer.String(), - "\n\n is not identical to\n\n", - secondBuffer.String()) - } -} - -// XXX(burntsushi) -// I think these tests probably should be removed. They are good, but they -// ought to be obsolete by toml-test. -func TestEncode(t *testing.T) { - type Embedded struct { - Int int `toml:"_int"` - } - type NonStruct int - - date := time.Date(2014, 5, 11, 20, 30, 40, 0, time.FixedZone("IST", 3600)) - dateStr := "2014-05-11T19:30:40Z" - - tests := map[string]struct { - input interface{} - wantOutput string - wantError error - }{ - "bool field": { - input: struct { - BoolTrue bool - BoolFalse bool - }{true, false}, - wantOutput: "BoolTrue = true\nBoolFalse = false\n", - }, - "int fields": { - input: struct { - Int int - Int8 int8 - Int16 int16 - Int32 int32 - Int64 int64 - }{1, 2, 3, 4, 5}, - wantOutput: "Int = 1\nInt8 = 2\nInt16 = 3\nInt32 = 4\nInt64 = 5\n", - }, - "uint fields": { - input: struct { - Uint uint - Uint8 uint8 - Uint16 uint16 - Uint32 uint32 - Uint64 uint64 - }{1, 2, 3, 4, 5}, - wantOutput: "Uint = 1\nUint8 = 2\nUint16 = 3\nUint32 = 4" + - "\nUint64 = 5\n", - }, - "float fields": { - input: struct { - Float32 float32 - Float64 float64 - }{1.5, 2.5}, - wantOutput: "Float32 = 1.5\nFloat64 = 2.5\n", - }, - "string field": { - input: struct{ String string }{"foo"}, - wantOutput: "String = \"foo\"\n", - }, - "string field and unexported field": { - input: struct { - String string - unexported int - }{"foo", 0}, - wantOutput: "String = \"foo\"\n", - }, - "datetime field in UTC": { - input: struct{ Date time.Time }{date}, - wantOutput: fmt.Sprintf("Date = %s\n", dateStr), - }, - "datetime field as primitive": { - // Using a map here to fail if isStructOrMap() returns true for - // time.Time. - input: map[string]interface{}{ - "Date": date, - "Int": 1, - }, - wantOutput: fmt.Sprintf("Date = %s\nInt = 1\n", dateStr), - }, - "array fields": { - input: struct { - IntArray0 [0]int - IntArray3 [3]int - }{[0]int{}, [3]int{1, 2, 3}}, - wantOutput: "IntArray0 = []\nIntArray3 = [1, 2, 3]\n", - }, - "slice fields": { - input: struct{ IntSliceNil, IntSlice0, IntSlice3 []int }{ - nil, []int{}, []int{1, 2, 3}, - }, - wantOutput: "IntSlice0 = []\nIntSlice3 = [1, 2, 3]\n", - }, - "datetime slices": { - input: struct{ DatetimeSlice []time.Time }{ - []time.Time{date, date}, - }, - wantOutput: fmt.Sprintf("DatetimeSlice = [%s, %s]\n", - dateStr, dateStr), - }, - "nested arrays and slices": { - input: struct { - SliceOfArrays [][2]int - ArrayOfSlices [2][]int - SliceOfArraysOfSlices [][2][]int - ArrayOfSlicesOfArrays [2][][2]int - SliceOfMixedArrays [][2]interface{} - ArrayOfMixedSlices [2][]interface{} - }{ - [][2]int{{1, 2}, {3, 4}}, - [2][]int{{1, 2}, {3, 4}}, - [][2][]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [2][][2]int{ - { - {1, 2}, {3, 4}, - }, - { - {5, 6}, {7, 8}, - }, - }, - [][2]interface{}{ - {1, 2}, {"a", "b"}, - }, - [2][]interface{}{ - {1, 2}, {"a", "b"}, - }, - }, - wantOutput: `SliceOfArrays = [[1, 2], [3, 4]] -ArrayOfSlices = [[1, 2], [3, 4]] -SliceOfArraysOfSlices = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -ArrayOfSlicesOfArrays = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] -SliceOfMixedArrays = [[1, 2], ["a", "b"]] -ArrayOfMixedSlices = [[1, 2], ["a", "b"]] -`, - }, - "empty slice": { - input: struct{ Empty []interface{} }{[]interface{}{}}, - wantOutput: "Empty = []\n", - }, - "(error) slice with element type mismatch (string and integer)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, "a"}}, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with element type mismatch (integer and float)": { - input: struct{ Mixed []interface{} }{[]interface{}{1, 2.5}}, - wantError: errArrayMixedElementTypes, - }, - "slice with elems of differing Go types, same TOML types": { - input: struct { - MixedInts []interface{} - MixedFloats []interface{} - }{ - []interface{}{ - int(1), int8(2), int16(3), int32(4), int64(5), - uint(1), uint8(2), uint16(3), uint32(4), uint64(5), - }, - []interface{}{float32(1.5), float64(2.5)}, - }, - wantOutput: "MixedInts = [1, 2, 3, 4, 5, 1, 2, 3, 4, 5]\n" + - "MixedFloats = [1.5, 2.5]\n", - }, - "(error) slice w/ element type mismatch (one is nested array)": { - input: struct{ Mixed []interface{} }{ - []interface{}{1, []interface{}{2}}, - }, - wantError: errArrayMixedElementTypes, - }, - "(error) slice with 1 nil element": { - input: struct{ NilElement1 []interface{} }{[]interface{}{nil}}, - wantError: errArrayNilElement, - }, - "(error) slice with 1 nil element (and other non-nil elements)": { - input: struct{ NilElement []interface{} }{ - []interface{}{1, nil}, - }, - wantError: errArrayNilElement, - }, - "simple map": { - input: map[string]int{"a": 1, "b": 2}, - wantOutput: "a = 1\nb = 2\n", - }, - "map with interface{} value type": { - input: map[string]interface{}{"a": 1, "b": "c"}, - wantOutput: "a = 1\nb = \"c\"\n", - }, - "map with interface{} value type, some of which are structs": { - input: map[string]interface{}{ - "a": struct{ Int int }{2}, - "b": 1, - }, - wantOutput: "b = 1\n\n[a]\n Int = 2\n", - }, - "nested map": { - input: map[string]map[string]int{ - "a": {"b": 1}, - "c": {"d": 2}, - }, - wantOutput: "[a]\n b = 1\n\n[c]\n d = 2\n", - }, - "nested struct": { - input: struct{ Struct struct{ Int int } }{ - struct{ Int int }{1}, - }, - wantOutput: "[Struct]\n Int = 1\n", - }, - "nested struct and non-struct field": { - input: struct { - Struct struct{ Int int } - Bool bool - }{struct{ Int int }{1}, true}, - wantOutput: "Bool = true\n\n[Struct]\n Int = 1\n", - }, - "2 nested structs": { - input: struct{ Struct1, Struct2 struct{ Int int } }{ - struct{ Int int }{1}, struct{ Int int }{2}, - }, - wantOutput: "[Struct1]\n Int = 1\n\n[Struct2]\n Int = 2\n", - }, - "deeply nested structs": { - input: struct { - Struct1, Struct2 struct{ Struct3 *struct{ Int int } } - }{ - struct{ Struct3 *struct{ Int int } }{&struct{ Int int }{1}}, - struct{ Struct3 *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct1]\n [Struct1.Struct3]\n Int = 1" + - "\n\n[Struct2]\n", - }, - "nested struct with nil struct elem": { - input: struct { - Struct struct{ Inner *struct{ Int int } } - }{ - struct{ Inner *struct{ Int int } }{nil}, - }, - wantOutput: "[Struct]\n", - }, - "nested struct with no fields": { - input: struct { - Struct struct{ Inner struct{} } - }{ - struct{ Inner struct{} }{struct{}{}}, - }, - wantOutput: "[Struct]\n [Struct.Inner]\n", - }, - "struct with tags": { - input: struct { - Struct struct { - Int int `toml:"_int"` - } `toml:"_struct"` - Bool bool `toml:"_bool"` - }{ - struct { - Int int `toml:"_int"` - }{1}, true, - }, - wantOutput: "_bool = true\n\n[_struct]\n _int = 1\n", - }, - "embedded struct": { - input: struct{ Embedded }{Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "embedded *struct": { - input: struct{ *Embedded }{&Embedded{1}}, - wantOutput: "_int = 1\n", - }, - "nested embedded struct": { - input: struct { - Struct struct{ Embedded } `toml:"_struct"` - }{struct{ Embedded }{Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "nested embedded *struct": { - input: struct { - Struct struct{ *Embedded } `toml:"_struct"` - }{struct{ *Embedded }{&Embedded{1}}}, - wantOutput: "[_struct]\n _int = 1\n", - }, - "array of tables": { - input: struct { - Structs []*struct{ Int int } `toml:"struct"` - }{ - []*struct{ Int int }{{1}, {3}}, - }, - wantOutput: "[[struct]]\n Int = 1\n\n[[struct]]\n Int = 3\n", - }, - "array of tables order": { - input: map[string]interface{}{ - "map": map[string]interface{}{ - "zero": 5, - "arr": []map[string]int{ - map[string]int{ - "friend": 5, - }, - }, - }, - }, - wantOutput: "[map]\n zero = 5\n\n [[map.arr]]\n friend = 5\n", - }, - "(error) top-level slice": { - input: []struct{ Int int }{{1}, {2}, {3}}, - wantError: errNoKey, - }, - "(error) slice of slice": { - input: struct { - Slices [][]struct{ Int int } - }{ - [][]struct{ Int int }{{{1}}, {{2}}, {{3}}}, - }, - wantError: errArrayNoTable, - }, - "(error) map no string key": { - input: map[int]string{1: ""}, - wantError: errNonString, - }, - "(error) anonymous non-struct": { - input: struct{ NonStruct }{5}, - wantError: errAnonNonStruct, - }, - "(error) empty key name": { - input: map[string]int{"": 1}, - wantError: errAnything, - }, - "(error) empty map name": { - input: map[string]interface{}{ - "": map[string]int{"v": 1}, - }, - wantError: errAnything, - }, - } - for label, test := range tests { - encodeExpected(t, label, test.input, test.wantOutput, test.wantError) - } -} - -func TestEncodeNestedTableArrays(t *testing.T) { - type song struct { - Name string `toml:"name"` - } - type album struct { - Name string `toml:"name"` - Songs []song `toml:"songs"` - } - type springsteen struct { - Albums []album `toml:"albums"` - } - value := springsteen{ - []album{ - {"Born to Run", - []song{{"Jungleland"}, {"Meeting Across the River"}}}, - {"Born in the USA", - []song{{"Glory Days"}, {"Dancing in the Dark"}}}, - }, - } - expected := `[[albums]] - name = "Born to Run" - - [[albums.songs]] - name = "Jungleland" - - [[albums.songs]] - name = "Meeting Across the River" - -[[albums]] - name = "Born in the USA" - - [[albums.songs]] - name = "Glory Days" - - [[albums.songs]] - name = "Dancing in the Dark" -` - encodeExpected(t, "nested table arrays", value, expected, nil) -} - -func TestEncodeArrayHashWithNormalHashOrder(t *testing.T) { - type Alpha struct { - V int - } - type Beta struct { - V int - } - type Conf struct { - V int - A Alpha - B []Beta - } - - val := Conf{ - V: 1, - A: Alpha{2}, - B: []Beta{{3}}, - } - expected := "V = 1\n\n[A]\n V = 2\n\n[[B]]\n V = 3\n" - encodeExpected(t, "array hash with normal hash order", val, expected, nil) -} - -func encodeExpected( - t *testing.T, label string, val interface{}, wantStr string, wantErr error, -) { - var buf bytes.Buffer - enc := NewEncoder(&buf) - err := enc.Encode(val) - if err != wantErr { - if wantErr != nil { - if wantErr == errAnything && err != nil { - return - } - t.Errorf("%s: want Encode error %v, got %v", label, wantErr, err) - } else { - t.Errorf("%s: Encode failed: %s", label, err) - } - } - if err != nil { - return - } - if got := buf.String(); wantStr != got { - t.Errorf("%s: want\n-----\n%q\n-----\nbut got\n-----\n%q\n-----\n", - label, wantStr, got) - } -} - -func ExampleEncoder_Encode() { - date, _ := time.Parse(time.RFC822, "14 Mar 10 18:00 UTC") - var config = map[string]interface{}{ - "date": date, - "counts": []int{1, 1, 2, 3, 5, 8}, - "hash": map[string]string{ - "key1": "val1", - "key2": "val2", - }, - } - buf := new(bytes.Buffer) - if err := NewEncoder(buf).Encode(config); err != nil { - log.Fatal(err) - } - fmt.Println(buf.String()) - - // Output: - // counts = [1, 1, 2, 3, 5, 8] - // date = 2010-03-14T18:00:00Z - // - // [hash] - // key1 = "val1" - // key2 = "val2" -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index 140c44c11..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index fb285e7f5..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 01fcb491b..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,725 +0,0 @@ -package toml - -import ( - "fmt" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart -) - -const ( - eof = 0 - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - arrayValTerm = ',' - commentStart = '#' - stringStart = '"' - stringEnd = '"' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - width int - line int - state stateFn - items chan item - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input + "\n", - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop.") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.pos >= len(lx.input) { - lx.width = 0 - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.pos += lx.width - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only once per call of next. -func (lx *lexer) backup() { - lx.pos -= lx.width - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (new lines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("Unexpected EOF.") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a new line. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a new line for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.ignore() - return lexTop - } - return lx.errorf("Expected a top-level item to end with a new line, "+ - "comment or EOF, but got %q instead.", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("Expected end of table array name delimiter %q, "+ - "but got %q instead.", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - switch lx.next() { - case tableEnd: - return lx.errorf("Unexpected end of table. (Tables cannot " + - "be empty.)") - case tableSep: - return lx.errorf("Unexpected table separator. (Tables cannot " + - "be empty.)") - } - return lexTableName -} - -// lexTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexTableName(lx *lexer) stateFn { - switch lx.peek() { - case tableStart: - return lx.errorf("Table names cannot contain %q or %q.", - tableStart, tableEnd) - case tableEnd: - lx.emit(itemText) - lx.next() - return lx.pop() - case tableSep: - lx.emit(itemText) - lx.next() - lx.ignore() - return lexTableNameStart - } - lx.next() - return lexTableName -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("Unexpected key separator %q.", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - } - - lx.ignore() - lx.emit(itemKeyStart) - lx.next() - return lexKey -} - -// lexKey consumes the text of a key. Assumes that the first character (which -// is not whitespace) has already been consumed. -func lexKey(lx *lexer) stateFn { - r := lx.peek() - - // XXX: Possible divergence from spec? - // "Keys start with the first non-whitespace character and end with the - // last non-whitespace character before the equals sign." - // Note here that whitespace is either a tab or a space. - // But we'll call it quits if we see a new line too. - if isWhitespace(r) || isNL(r) { - lx.emit(itemText) - return lexKeyEnd - } - - // Let's also call it quits if we see an equals sign. - if r == keySep { - lx.emit(itemText) - return lexKeyEnd - } - - lx.next() - return lexKey -} - -// lexKeyEnd consumes the end of a key (up to the key separator). -// Assumes that the first whitespace character after a key (or the '=' -// separator) has NOT been consumed. -func lexKeyEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexKeyEnd) - case r == keySep: - return lexSkip(lx, lexValue) - } - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new lines. - r := lx.next() - if isWhitespace(r) { - return lexSkip(lx, lexValue) - } - - switch { - case r == arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case r == stringStart: - lx.ignore() // ignore the '"' - return lexString - case r == 't': - return lexTrue - case r == 'f': - return lexFalse - case r == '-': - return lexNumberStart - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - case r == '.': // special error case, be kind to users - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected value but found %q instead.", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and new lines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == arrayValTerm: - return lx.errorf("Unexpected array value terminator %q.", - arrayValTerm) - case r == arrayEnd: - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes the cruft between values of an array. Namely, -// it ignores whitespace and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == arrayValTerm: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf("Expected an array value terminator %q or an array "+ - "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) -} - -// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has -// just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == '\\': - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexStringEscape consumes an escaped character. It assumes that the preceding -// '\\' has already been consumed. -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '/': - fallthrough - case '\\': - return lexString - case 'u': - return lexStringUnicode - } - return lx.errorf("Invalid escape character %q. Only the following "+ - "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, and \\uXXXX.", r) -} - -// lexStringBinary consumes two hexadecimal digits following '\x'. It assumes -// that the '\x' has already been consumed. -func lexStringUnicode(lx *lexer) stateFn { - var r rune - - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\x', "+ - "but got '%s' instead.", lx.current()) - } - } - return lexString -} - -// lexNumberOrDateStart consumes either a (positive) integer, float or datetime. -// It assumes that NO negative sign has been consumed. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumberOrDate -} - -// lexNumberOrDate consumes either a (positive) integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '-': - if lx.pos-lx.start != 5 { - return lx.errorf("All ISO8601 dates must be in full Zulu form.") - } - return lexDateAfterYear - case isDigit(r): - return lexNumberOrDate - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. -// It assumes that "YYYY-" has already been consumed. -func lexDateAfterYear(lx *lexer) stateFn { - formats := []rune{ - // digits are '0'. - // everything else is direct equality. - '0', '0', '-', '0', '0', - 'T', - '0', '0', ':', '0', '0', ':', '0', '0', - 'Z', - } - for _, f := range formats { - r := lx.next() - if f == '0' { - if !isDigit(r) { - return lx.errorf("Expected digit in ISO8601 datetime, "+ - "but found %q instead.", r) - } - } else if f != r { - return lx.errorf("Expected %q in ISO8601 datetime, "+ - "but found %q instead.", f, r) - } - } - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that a -// negative sign has already been read, but that *no* digits have been consumed. -// lexNumberStart will move to the appropriate integer or float states. -func lexNumberStart(lx *lexer) stateFn { - // we MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - switch { - case isDigit(r): - return lexNumber - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloatStart starts the consumption of digits of a float after a '.'. -// Namely, at least one digit is required. -func lexFloatStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - return lx.errorf("Floats must have a digit after the '.', but got "+ - "%q instead.", r) - } - return lexFloat -} - -// lexFloat consumes the digits of a float after a '.'. -// Assumes that one digit has been consumed after a '.' already. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexConst consumes the s[1:] in s. It assumes that s[0] has already been -// consumed. -func lexConst(lx *lexer, s string) stateFn { - for i := range s[1:] { - if r := lx.next(); r != rune(s[i+1]) { - return lx.errorf("Expected %q, but found %q instead.", s[:i+1], - s[:i]+string(r)) - } - } - return nil -} - -// lexTrue consumes the "rue" in "true". It assumes that 't' has already -// been consumed. -func lexTrue(lx *lexer) stateFn { - if fn := lexConst(lx, "true"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexFalse consumes the "alse" in "false". It assumes that 'f' has already -// been consumed. -func lexFalse(lx *lexer) stateFn { - if fn := lexConst(lx, "false"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first new line character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index 43afe3c3f..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,417 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d, key '%s': %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("Near line %d: %s", it.line, it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.expect(itemText) - p.approxLine = kg.line - - key := make(Key, 0) - for ; kg.typ == itemText; kg = p.next() { - key = append(key, kg.val) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.expect(itemText) - p.approxLine = kg.line - - key := make(Key, 0) - for ; kg.typ == itemText; kg = p.next() { - key = append(key, kg.val) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.expect(itemText) - p.currentKey = kname.val - p.approxLine = kname.line - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceUnicode(replaceEscapes(it.val)), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - num, err := strconv.ParseInt(it.val, 10, 64) - if err != nil { - // See comment below for floats describing why we make a - // distinction between a bug and a user error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - num, err := strconv.ParseFloat(it.val, 64) - if err != nil { - // Distinguish float values. Normally, it'd be a bug if the lexer - // provides an invalid float, but it's possible that the float is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - // - // This is also true for integers. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.bug("Expected float value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - t, err := time.Parse("2006-01-02T15:04:05Z", it.val) - if err != nil { - p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func replaceEscapes(s string) string { - return strings.NewReplacer( - "\\b", "\u0008", - "\\t", "\u0009", - "\\n", "\u000A", - "\\f", "\u000C", - "\\r", "\u000D", - "\\\"", "\u0022", - "\\/", "\u002F", - "\\\\", "\u005C", - ).Replace(s) -} - -func (p *parser) replaceUnicode(s string) string { - indexEsc := func() int { - return strings.Index(s, "\\u") - } - for i := indexEsc(); i != -1; i = indexEsc() { - asciiBytes := s[i+2 : i+6] - s = strings.Replace(s, s[i:i+6], p.asciiEscapeToUnicode(asciiBytes), -1) - } - return s -} - -func (p *parser) asciiEscapeToUnicode(s string) string { - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - - // BUG(burntsushi) - // I honestly don't understand how this works. I can't seem - // to find a way to make this fail. I figured this would fail on invalid - // UTF-8 characters like U+DCFF, but it doesn't. - r := string(rune(hex)) - if !utf8.ValidString(r) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return string(r) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim b/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164be0..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index 79dac6b19..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,85 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but arrays "+ - "must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go b/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 7592f87a4..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,241 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - name := sf.Tag.Get("toml") - if name == "-" { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/.gitignore b/Godeps/_workspace/src/github.com/BurntSushi/ty/.gitignore deleted file mode 100644 index f1860a0ef..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -TAGS -tags -.*.swp diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/COPYING b/Godeps/_workspace/src/github.com/BurntSushi/ty/COPYING deleted file mode 100644 index 5c93f4565..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/COPYING +++ /dev/null @@ -1,13 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/Makefile b/Godeps/_workspace/src/github.com/BurntSushi/ty/Makefile deleted file mode 100644 index 5a9bbe980..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -all: install - -install: - go install ./... - -test: install - go test ./... - -benchcmp: install - cd fun \ - && echo "Running reflection benchmarks..." \ - && go test -cpu 12 -run NONE -benchmem -bench . > reflect.bench \ - && echo "Running built in benchmarks..." \ - && go test -cpu 12 -run NONE -benchmem -bench . -builtin > builtin.bench \ - && benchcmp builtin.bench reflect.bench > ../perf/cmp.bench \ - && rm builtin.bench reflect.bench - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/README.md b/Godeps/_workspace/src/github.com/BurntSushi/ty/README.md deleted file mode 100644 index 8e20278a8..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/README.md +++ /dev/null @@ -1,119 +0,0 @@ -Package `ty` provides utilities for writing type parametric functions with run -time type safety. - -This package contains two sub-packages `fun` and `data` which define some -potentially useful functions and abstractions using the type checker in -this package. - -## Requirements - -Go tip (or 1.1 when it's released) is required. This package will not work -with Go 1.0.x or earlier. - -The very foundation of this package only recently became possible with the -addition of 3 new functions in the standard library `reflect` package: -SliceOf, MapOf and ChanOf. In particular, it provides the ability to -dynamically construct types at run time from component types. - -Further extensions to this package can be made if similar functions are added -for structs and functions(?). - -## Installation - -```bash -go get github.com/BurntSushi/ty -go get github.com/BurntSushi/ty/fun -``` - -## Examples - -Squaring each integer in a slice: - -```go -square := func(x int) int { return x * x } -nums := []int{1, 2, 3, 4, 5} -squares := Map(square, nums).([]int) -``` - -Reversing any slice: - -```go -slice := []string{"a", "b", "c"} -reversed := Reverse(slice).([]string) -``` - -Sorting any slice: - -```go -// Sort a slice of structs with first class functions. -type Album struct { - Title string - Year int -} -albums := []Album{ - {"Born to Run", 1975}, - {"WIESS", 1973}, - {"Darkness", 1978}, - {"Greetings", 1973}, -} - -less := func(a, b Album) bool { return a.Year < b.Year }, -sorted := QuickSort(less, albums).([]Album) -``` - -Parallel map: - -```go -// Compute the prime factorization concurrently -// for every integer in [1000, 10000]. -primeFactors := func(n int) []int { // compute prime factors } -factors := ParMap(primeFactors, Range(1000, 10001)).([]int) -``` - -Asynchronous channel without a fixed size buffer: - -```go -s, r := AsyncChan(new(chan int)) -send, recv := s.(chan<- int), r.(<-chan int) - -// Send as much as you want. -for i := 0; i < 100; i++ { - s <- i -} -close(s) -for i := range recv { - // do something with `i` -} -``` - -Shuffle any slice in place: - -```go -jumbleMe := []string{"The", "quick", "brown", "fox"} -Shuffle(jumbleMe) -``` - -Function memoization: - -```go -// Memoizing a recursive function like `fibonacci`. -// Write it like normal: -var fib func(n int64) int64 -fib = func(n int64) int64 { - switch n { - case 0: - return 0 - case 1: - return 1 - } - return fib(n - 1) + fib(n - 2) -} - -// And wrap it with `Memo`. -fib = Memo(fib).(func(int64) int64) - -// Will keep your CPU busy for a long time -// without memoization. -fmt.Println(fib(80)) -``` - diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/data/doc.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/data/doc.go deleted file mode 100644 index b32939915..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/data/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Package data tumbles down the rabbit hole into parametric data types. - -A parametric data type in this package is a data type that is parameterized by -one or more types discovered at run time. For example, an ordered map is -parameterized by two types: the type of its keys and the type of its values. - -Implementation - -While all parametric inputs and outputs of each function have a Go type of -`interface{}`, the underlying type is maintained via reflection. In particular, -any operation that interacts with a parametric data type does so via -reflection so that the type safety found in Go at compile time can be -recovered at run time. - -For example, consider the case of an ordered map. One might define such a map -as a list of its keys in order and a map of `interface{}` to `interface{}`: - - type OrdMap struct { - M map[interface{}]interface{} - Keys []interface{} - } - -And one can interact with this map using standard built-in Go operations: - - // Add a key - M["key"] = "value" - Keys = append(Keys, "key") - - // Delete a key - delete(M, "key") - - // Read a key - M["key"] - -But there is no type safety with such a representation, even at run time: - - // Both of these operations are legal with - // the aforementioned representation. - M["key"] = "value" - M[5] = true - -Thus, the contribution of this library is to maintain type safety at run time -by guaranteeing that all operations are consistent with Go typing rules: - - type OrdMap struct { - M reflect.Value - Keys reflect.Value - } - -And one must interact with a map using `reflect`: - - key, val := "key", "value" - rkey := reflect.ValueOf(key) - rval := reflect.ValueOf(val) - - // Add a key - M.SetMapIndex(rkey, rval) - Keys = reflect.Append(Keys, rkey) - - // Delete a key - M.SetMapIndex(rkey, reflect.Value{}) - - // Read a key - M.MapIndex(rkey) - -Which guarantees, at run-time, that the following cannot happen: - - key2, val2 := 5, true - rkey2 := reflect.ValueOf(key2) - rval2 := reflect.ValueOf(val2) - - // One or the other operation will be disallowed, - // assuming `OrdMap` isn't instantiated with - // `interface{}` as the key and value type. - M.SetMapIndex(rkey, rval) - M.SetMapIndex(rkey2, rval2) - -The result is much more painful library code but only slightly more painful -client code. -*/ -package data diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap.go deleted file mode 100644 index f3192de79..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap.go +++ /dev/null @@ -1,175 +0,0 @@ -package data - -import ( - "reflect" - - "github.com/BurntSushi/ty" -) - -// OrdMap has a parametric type `OrdMap` where `K` is the type -// of the map's keys and `V` is the type of the map's values. -type OrdMap struct { - m reflect.Value - keys reflect.Value - ktype, vtype reflect.Type -} - -// OrderedMap returns a new instance of OrdMap instantiated with the key -// and value types given. Namely, the types should be provided via nil -// pointers, e.g., to create a map from strings to integers: -// -// omap := OrderedMap(new(string), new(int)) -// -// An ordered map maintains the insertion order of all keys in the map. -// Namely, `(*OrdMap).Keys()` returns a slice of keys in the order -// they were inserted. The order of a key can *only* be changed if it is -// deleted and added again. -// -// All of the operations on an ordered map have the same time complexity as -// the built-in `map`, except for `Delete` which is O(n) in the number of -// keys. -func OrderedMap(ktype, vtype interface{}) *OrdMap { - // A giant hack to get `Check` to do all the type construction work for us. - chk := ty.Check( - new(func(*ty.A, *ty.B) (ty.A, ty.B, map[ty.A]ty.B, []ty.A)), - ktype, vtype) - tkey, tval := chk.Returns[0], chk.Returns[1] - tmap, tkeys := chk.Returns[2], chk.Returns[3] - - return &OrdMap{ - m: reflect.MakeMap(tmap), - keys: reflect.MakeSlice(tkeys, 0, 10), - ktype: tkey, - vtype: tval, - } -} - -// Exists has a parametric type: -// -// func (om *OrdMap) Exists(key K) bool -// -// Exists returns true if `key` is in the map `om`. -func (om *OrdMap) Exists(key interface{}) bool { - rkey := ty.AssertType(key, om.ktype) - return om.exists(rkey) -} - -func (om *OrdMap) exists(rkey reflect.Value) bool { - return om.m.MapIndex(rkey).IsValid() -} - -// Put has a parametric type: -// -// func (om *OrdMap) Put(key K, val V) -// -// Put adds or overwrites `key` into the map `om` with value `val`. -// If `key` already exists in the map, then its position in the ordering -// of the map is not changed. -func (om *OrdMap) Put(key, val interface{}) { - rkey := ty.AssertType(key, om.ktype) - rval := ty.AssertType(val, om.vtype) - if !om.exists(rkey) { - om.keys = reflect.Append(om.keys, rkey) - } - om.m.SetMapIndex(rkey, rval) -} - -// Get has a parametric type: -// -// func (om *OrdMap) Get(key K) V -// -// Get retrieves the value in the map `om` corresponding to `key`. If the -// value does not exist, then the zero value of type `V` is returned. -func (om *OrdMap) Get(key interface{}) interface{} { - rkey := ty.AssertType(key, om.ktype) - rval := om.m.MapIndex(rkey) - if !rval.IsValid() { - return om.zeroValue().Interface() - } - return rval.Interface() -} - -// TryGet has a parametric type: -// -// func (om *OrdMap) TryGet(key K) (V, bool) -// -// TryGet retrieves the value in the map `om` corresponding to `key` and -// reports whether the value exists in the map or not. If the value does -// not exist, then the zero value of `V` and `false` are returned. -func (om *OrdMap) TryGet(key interface{}) (interface{}, bool) { - rkey := ty.AssertType(key, om.ktype) - rval := om.m.MapIndex(rkey) - if !rval.IsValid() { - return om.zeroValue().Interface(), false - } - return rval.Interface(), true -} - -// Delete has a parametric type: -// -// func (om *OrdMap) Delete(key K) -// -// Delete removes `key` from the map `om`. -// -// N.B. Delete is O(n) in the number of keys. -func (om *OrdMap) Delete(key interface{}) { - rkey := ty.AssertType(key, om.ktype) - - // Avoid doing work if we don't need to. - if !om.exists(rkey) { - return - } - - keysLen := om.keys.Len() - for i := 0; i < keysLen; i++ { - if key == om.keys.Index(i).Interface() { - // om.keys = append(om.keys[:i], om.keys[i+1:]...) - om.keys = reflect.AppendSlice( - om.keys.Slice(0, i), om.keys.Slice(i+1, keysLen)) - break - } - } - - // Setting a key to a zero reflect.Value deletes the key from the map. - om.m.SetMapIndex(rkey, reflect.Value{}) -} - -// Keys has a parametric type: -// -// func (om *OrdMap) Keys() []K -// -// Keys returns a list of keys in `om` in the order they were inserted. -// -// Behavior is undefined if the list is modified by the caller. -func (om *OrdMap) Keys() interface{} { - return om.keys.Interface() -} - -// Values has a parametric type: -// -// func (om *OrdMap) Values() []V -// -// Values returns a shallow copy of the values in `om` in the order that they -// were inserted. -func (om *OrdMap) Values() interface{} { - mlen := om.Len() - tvals := reflect.SliceOf(om.vtype) - rvals := reflect.MakeSlice(tvals, mlen, mlen) - for i := 0; i < mlen; i++ { - rvals.Index(i).Set(om.m.MapIndex(om.keys.Index(i))) - } - return rvals.Interface() -} - -// Len has a parametric type: -// -// func (om *OrdMap) Len() int -// -// Len returns the number of keys in the map `om`. -func (om *OrdMap) Len() int { - return om.m.Len() -} - -func (om *OrdMap) zeroValue() reflect.Value { - return reflect.New(om.vtype).Elem() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap_test.go deleted file mode 100644 index 3b1326bb0..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/data/ordmap_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package data - -import ( - "fmt" - "reflect" - "testing" -) - -var pf = fmt.Printf - -func TestOrdMap(t *testing.T) { - omap := OrderedMap(new(string), new(int)) - - omap.Put("kaitlyn", 24) - omap.Put("andrew", 25) - omap.Put("lauren", 20) - omap.Put("jen", 24) - omap.Put("brennan", 25) - - omap.Delete("kaitlyn") - - assertDeep(t, omap.Keys(), []string{"andrew", "lauren", "jen", "brennan"}) - assertDeep(t, omap.Values(), []int{25, 20, 24, 25}) -} - -func ExampleOrderedMap() { - omap := OrderedMap(new(string), new([]string)) - - omap.Put("Bruce Springsteen", - []string{"Thunder Road", "Born to Run", "This Hard Land"}) - omap.Put("J. Geils Band", - []string{"Musta Got Lost", "Freeze Frame", "Southside Shuffle"}) - omap.Put("Bob Seger", - []string{"Against the Wind", "Roll Me Away", "Night Moves"}) - - for _, key := range omap.Keys().([]string) { - fmt.Println(key) - } - - omap.Delete("J. Geils Band") - fmt.Println("\nDeleted 'J. Geils Band'...\n") - - for _, key := range omap.Keys().([]string) { - fmt.Printf("%s: %v\n", key, omap.Get(key)) - } - - // Output: - // Bruce Springsteen - // J. Geils Band - // Bob Seger - // - // Deleted 'J. Geils Band'... - // - // Bruce Springsteen: [Thunder Road Born to Run This Hard Land] - // Bob Seger: [Against the Wind Roll Me Away Night Moves] -} - -func assertDeep(t *testing.T, v1, v2 interface{}) { - if !reflect.DeepEqual(v1, v2) { - t.Fatalf("%v != %v", v1, v2) - } -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/doc.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/doc.go deleted file mode 100644 index 78405f7e8..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Package ty provides utilities for writing type parametric functions with run -time type safety. - -This package contains two sub-packages `fun` and `data` which define some -potentially useful functions and abstractions using the type checker in -this package. - -Requirements - -Go tip (or 1.1 when it's released) is required. This package will not work -with Go 1.0.x or earlier. - -The very foundation of this package only recently became possible with the -addition of 3 new functions in the standard library `reflect` package: -SliceOf, MapOf and ChanOf. In particular, it provides the ability to -dynamically construct types at run time from component types. - -Further extensions to this package can be made if similar functions are added -for structs and functions(?). -*/ -package ty diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan.go deleted file mode 100644 index 45db9f492..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan.go +++ /dev/null @@ -1,84 +0,0 @@ -package fun - -import ( - "reflect" - - "github.com/BurntSushi/ty" -) - -// AsyncChan has a parametric type: -// -// func AsyncChan(chan A) (send chan<- A, recv <-chan A) -// -// AsyncChan provides a channel abstraction without a fixed size buffer. -// The input should be a pointer to a channel that has a type without a -// direction, e.g., `new(chan int)`. Two new channels are returned: `send` and -// `recv`. The caller must send data on the `send` channel and receive data on -// the `recv` channel. -// -// Implementation is inspired by Kyle Lemons' work: -// https://github.com/kylelemons/iq/blob/master/iq_slice.go -func AsyncChan(baseChan interface{}) (send, recv interface{}) { - chk := ty.Check( - new(func(*chan ty.A) (chan ty.A, chan ty.A)), - baseChan) - - // We don't care about the baseChan---it is only used to construct - // the return types. - tsend, trecv := chk.Returns[0], chk.Returns[1] - - buf := make([]reflect.Value, 0, 10) - rsend := reflect.MakeChan(tsend, 0) - rrecv := reflect.MakeChan(trecv, 0) - - go func() { - defer rrecv.Close() - - BUFLOOP: - for { - if len(buf) == 0 { - rv, ok := rsend.Recv() - if !ok { - break BUFLOOP - } - buf = append(buf, rv) - } - - cases := []reflect.SelectCase{ - // case v, ok := <-send - { - Dir: reflect.SelectRecv, - Chan: rsend, - }, - // case recv <- buf[0] - { - Dir: reflect.SelectSend, - Chan: rrecv, - Send: buf[0], - }, - } - choice, rval, rok := reflect.Select(cases) - switch choice { - case 0: - // case v, ok := <-send - if !rok { - break BUFLOOP - } - buf = append(buf, rval) - case 1: - // case recv <- buf[0] - buf = buf[1:] - default: - panic("bug") - } - } - for _, rv := range buf { - rrecv.Send(rv) - } - }() - - // Create the directional channel types. - tsDir := reflect.ChanOf(reflect.SendDir, tsend.Elem()) - trDir := reflect.ChanOf(reflect.RecvDir, trecv.Elem()) - return rsend.Convert(tsDir).Interface(), rrecv.Convert(trDir).Interface() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan_test.go deleted file mode 100644 index 8eae5f890..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/chan_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package fun - -import ( - "testing" -) - -func TestAsyncChan(t *testing.T) { - s, r := AsyncChan(new(chan int)) - send, recv := s.(chan<- int), r.(<-chan int) - - sending := randIntSlice(1000, 0) - for _, v := range sending { - send <- v - } - close(send) - - received := make([]int, 0) - for v := range recv { - received = append(received, v) - } - - assertDeep(t, sending, received) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/doc.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/doc.go deleted file mode 100644 index 50e10a9ec..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/doc.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Package fun provides type parametric utility functions for lists, sets, -channels and maps. - -The central contribution of this package is a set of functions that operate -on values without depending on their types while maintaining type safety at -run time using the `reflect` package. - -There are two primary concerns when deciding whether to use this package -or not: the loss of compile time type safety and performance. In particular, -with regard to performance, most functions here are much slower than their -built-in counter parts. However, there are a couple where the overhead of -reflection is relatively insignificant: AsyncChan and ParMap. - -In terms of code structure and organization, the price is mostly paid inside -of the package due to the annoyances of operating with `reflect`. The caller -usually only has one obligation other than to provide values consistent with -the type of the function: type assert the result to the desired type. - -When the caller provides values that are inconsistent with the parametric type -of the function, the function will panic with a `TypeError`. (Either because -the types cannot be unified or because they cannot be constructed due to -limitations of the `reflect` package. See the `github.com/BurntSushi/ty` -package for more details.) - -Requirements - -Go tip (or 1.1 when it's released) is required. This package will not work -with Go 1.0.x or earlier. - -The very foundation of this package only recently became possible with the -addition of 3 new functions in the standard library `reflect` package: -SliceOf, MapOf and ChanOf. In particular, it provides the ability to -dynamically construct types at run time from component types. - -Further extensions to this package can be made if similar functions are added -for structs and functions(?). - -Examples - -Squaring each integer in a slice: - - square := func(x int) int { return x * x } - nums := []int{1, 2, 3, 4, 5} - squares := Map(square, nums).([]int) - -Reversing any slice: - - slice := []string{"a", "b", "c"} - reversed := Reverse(slice).([]string) - -Sorting any slice: - - // Sort a slice of structs with first class functions. - type Album struct { - Title string - Year int - } - albums := []Album{ - {"Born to Run", 1975}, - {"WIESS", 1973}, - {"Darkness", 1978}, - {"Greetings", 1973}, - } - - less := func(a, b Album) bool { return a.Year < b.Year }, - sorted := QuickSort(less, albums).([]Album) - -Parallel map: - - // Compute the prime factorization concurrently - // for every integer in [1000, 10000]. - primeFactors := func(n int) []int { // compute prime factors } - factors := ParMap(primeFactors, Range(1000, 10001)).([]int) - -Asynchronous channel without a fixed size buffer: - - s, r := AsyncChan(new(chan int)) - send, recv := s.(chan<- int), r.(<-chan int) - - // Send as much as you want. - for i := 0; i < 100; i++ { - s <- i - } - close(s) - for i := range recv { - // do something with `i` - } - -Shuffle any slice in place: - - jumbleMe := []string{"The", "quick", "brown", "fox"} - Shuffle(jumbleMe) - -Function memoization: - - // Memoizing a recursive function like `fibonacci`. - // Write it like normal: - var fib func(n int64) int64 - fib = func(n int64) int64 { - switch n { - case 0: - return 0 - case 1: - return 1 - } - return fib(n - 1) + fib(n - 2) - } - - // And wrap it with `Memo`. - fib = Memo(fib).(func(int64) int64) - - // Will keep your CPU busy for a long time - // without memoization. - fmt.Println(fib(80)) - -*/ -package fun diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/fun_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/fun_test.go deleted file mode 100644 index af0fb8cf7..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/fun_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package fun - -import ( - "flag" - "fmt" - "math/rand" - "reflect" - "testing" - "time" -) - -var ( - pf = fmt.Printf - rng *rand.Rand - flagBuiltin = false -) - -func init() { - rng = rand.New(rand.NewSource(time.Now().UnixNano())) - - flag.BoolVar(&flagBuiltin, "builtin", flagBuiltin, - "When set, benchmarks for non-type parametric functions are run.") -} - -func assertDeep(t *testing.T, v1, v2 interface{}) { - if !reflect.DeepEqual(v1, v2) { - t.Fatalf("%v != %v", v1, v2) - } -} - -func randIntSlice(size, max int) []int { - if max == 0 { - max = 1000000 - } - slice := make([]int, size) - for i := 0; i < size; i++ { - slice[i] = rng.Intn(max) - } - return slice -} - -func randInt64Slice(size, max int64) []int64 { - if max == 0 { - max = 1000000 - } - slice := make([]int64, size) - for i := int64(0); i < size; i++ { - slice[i] = rng.Int63n(max) - } - return slice -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func.go deleted file mode 100644 index d3b387677..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func.go +++ /dev/null @@ -1,35 +0,0 @@ -package fun - -import ( - "reflect" - - "github.com/BurntSushi/ty" -) - -// Memo has a parametric type: -// -// func Memo(f func(A) B) func(A) B -// -// Memo memoizes any function of a single argument that returns a single value. -// The type `A` must be a Go type for which the comparison operators `==` and -// `!=` are fully defined (this rules out functions, maps and slices). -func Memo(f interface{}) interface{} { - chk := ty.Check( - new(func(func(ty.A) ty.B)), - f) - vf := chk.Args[0] - - saved := make(map[interface{}]reflect.Value) - memo := func(in []reflect.Value) []reflect.Value { - val := in[0].Interface() - ret, ok := saved[val] - if ok { - return []reflect.Value{ret} - } - - ret = call1(vf, in[0]) - saved[val] = ret - return []reflect.Value{ret} - } - return reflect.MakeFunc(vf.Type(), memo).Interface() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func_test.go deleted file mode 100644 index 4d6a2c766..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/func_test.go +++ /dev/null @@ -1,111 +0,0 @@ -package fun - -import ( - "fmt" - "testing" -) - -// The benchmarks are the test. The memo version (w/ reflection or not) should -// always be faster than the non-memo version given big enough N. - -func BenchmarkFibonacciMemo(b *testing.B) { - if flagBuiltin { - benchmarkFibonacciMemoBuiltin(b) - } else { - benchmarkFibonacciMemoReflect(b) - } -} - -func benchmarkFibonacciMemoBuiltin(b *testing.B) { - for i := 0; i < b.N; i++ { - fibonacciMemoBuiltin(30) - } -} - -func benchmarkFibonacciMemoReflect(b *testing.B) { - for i := 0; i < b.N; i++ { - fibonacciMemoReflect(30) - } -} - -func BenchmarkFibonacciNoMemo(b *testing.B) { - for i := 0; i < b.N; i++ { - fibonacci(30) - } -} - -func fibonacciMemoBuiltin(n int) int { - memo := func(f func(int) int) func(int) int { - saved := make(map[int]int) - return func(n int) int { - ret, ok := saved[n] - if ok { - return ret - } - - ret = f(n) - saved[n] = ret - return ret - } - } - - var fib func(n int) int - fib = memo(func(n int) int { - switch n { - case 0: - return 0 - case 1: - return 1 - } - return fib(n-1) + fib(n-2) - }) - return fib(n) -} - -func fibonacciMemoReflect(n int) int { - var fib func(n int) int - fib = Memo(func(n int) int { - switch n { - case 0: - return 0 - case 1: - return 1 - } - return fib(n-1) + fib(n-2) - }).(func(int) int) - return fib(n) -} - -func fibonacci(n int) int { - switch n { - case 0: - return 0 - case 1: - return 1 - } - return fibonacci(n-1) + fibonacci(n-2) -} - -func ExampleMemo() { - // Memoizing a recursive function like `fibonacci`. - // Write it like normal: - var fib func(n int64) int64 - fib = func(n int64) int64 { - switch n { - case 0: - return 0 - case 1: - return 1 - } - return fib(n-1) + fib(n-2) - } - - // And wrap it with `Memo`. - fib = Memo(fib).(func(int64) int64) - - // Will keep your CPU busy for a long time - // without memoization. - fmt.Println(fib(80)) - // Output: - // 23416728348467685 -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list.go deleted file mode 100644 index 416811c3e..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list.go +++ /dev/null @@ -1,303 +0,0 @@ -package fun - -import ( - "reflect" - "runtime" - "sync" - - "github.com/BurntSushi/ty" -) - -// All has a parametric type: -// -// func All(p func(A) bool, xs []A) bool -// -// All returns `true` if and only if every element in `xs` satisfies `p`. -func All(f, xs interface{}) bool { - chk := ty.Check( - new(func(func(ty.A) bool, []ty.A) bool), - f, xs) - vf, vxs := chk.Args[0], chk.Args[1] - - xsLen := vxs.Len() - for i := 0; i < xsLen; i++ { - if !call1(vf, vxs.Index(i)).Interface().(bool) { - return false - } - } - return true -} - -// Exists has a parametric type: -// -// func Exists(p func(A) bool, xs []A) bool -// -// Exists returns `true` if and only if an element in `xs` satisfies `p`. -func Exists(f, xs interface{}) bool { - chk := ty.Check( - new(func(func(ty.A) bool, []ty.A) bool), - f, xs) - vf, vxs := chk.Args[0], chk.Args[1] - - xsLen := vxs.Len() - for i := 0; i < xsLen; i++ { - if call1(vf, vxs.Index(i)).Interface().(bool) { - return true - } - } - return false -} - -// In has a parametric type: -// -// func In(needle A, haystack []A) bool -// -// In returns `true` if and only if `v` can be found in `xs`. The equality test -// used is Go's standard `==` equality and NOT deep equality. -// -// Note that this requires that `A` be a type that can be meaningfully compared. -func In(needle, haystack interface{}) bool { - chk := ty.Check( - new(func(ty.A, []ty.A) bool), - needle, haystack) - vhaystack := chk.Args[1] - - length := vhaystack.Len() - for i := 0; i < length; i++ { - if vhaystack.Index(i).Interface() == needle { - return true - } - } - return false -} - -// Map has a parametric type: -// -// func Map(f func(A) B, xs []A) []B -// -// Map returns the list corresponding to the return value of applying -// `f` to each element in `xs`. -func Map(f, xs interface{}) interface{} { - chk := ty.Check( - new(func(func(ty.A) ty.B, []ty.A) []ty.B), - f, xs) - vf, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0] - - xsLen := vxs.Len() - vys := reflect.MakeSlice(tys, xsLen, xsLen) - for i := 0; i < xsLen; i++ { - vy := call1(vf, vxs.Index(i)) - vys.Index(i).Set(vy) - } - return vys.Interface() -} - -// Filter has a parametric type: -// -// func Filter(p func(A) bool, xs []A) []A -// -// Filter returns a new list only containing the elements of `xs` that satisfy -// the predicate `p`. -func Filter(p, xs interface{}) interface{} { - chk := ty.Check( - new(func(func(ty.A) bool, []ty.A) []ty.A), - p, xs) - vp, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0] - - xsLen := vxs.Len() - vys := reflect.MakeSlice(tys, 0, xsLen) - for i := 0; i < xsLen; i++ { - vx := vxs.Index(i) - if call1(vp, vx).Bool() { - vys = reflect.Append(vys, vx) - } - } - return vys.Interface() -} - -// Foldl has a parametric type: -// -// func Foldl(f func(A, B) B, init B, xs []A) B -// -// Foldl reduces a list of A to a single element B using a left fold with -// an initial value `init`. -func Foldl(f, init, xs interface{}) interface{} { - chk := ty.Check( - new(func(func(ty.A, ty.B) ty.B, ty.B, []ty.A) ty.B), - f, init, xs) - vf, vinit, vxs, tb := chk.Args[0], chk.Args[1], chk.Args[2], chk.Returns[0] - - xsLen := vxs.Len() - vb := zeroValue(tb) - vb.Set(vinit) - if xsLen == 0 { - return vb.Interface() - } - - vb.Set(call1(vf, vxs.Index(0), vb)) - for i := 1; i < xsLen; i++ { - vb.Set(call1(vf, vxs.Index(i), vb)) - } - return vb.Interface() -} - -// Foldr has a parametric type: -// -// func Foldr(f func(A, B) B, init B, xs []A) B -// -// Foldr reduces a list of A to a single element B using a right fold with -// an initial value `init`. -func Foldr(f, init, xs interface{}) interface{} { - chk := ty.Check( - new(func(func(ty.A, ty.B) ty.B, ty.B, []ty.A) ty.B), - f, init, xs) - vf, vinit, vxs, tb := chk.Args[0], chk.Args[1], chk.Args[2], chk.Returns[0] - - xsLen := vxs.Len() - vb := zeroValue(tb) - vb.Set(vinit) - if xsLen == 0 { - return vb.Interface() - } - - vb.Set(call1(vf, vxs.Index(xsLen-1), vb)) - for i := xsLen - 2; i >= 0; i-- { - vb.Set(call1(vf, vxs.Index(i), vb)) - } - return vb.Interface() -} - -// Concat has a parametric type: -// -// func Concat(xs [][]A) []A -// -// Concat returns a new flattened list by appending all elements of `xs`. -func Concat(xs interface{}) interface{} { - chk := ty.Check( - new(func([][]ty.A) []ty.A), - xs) - vxs, tflat := chk.Args[0], chk.Returns[0] - - xsLen := vxs.Len() - vflat := reflect.MakeSlice(tflat, 0, xsLen*3) - for i := 0; i < xsLen; i++ { - vflat = reflect.AppendSlice(vflat, vxs.Index(i)) - } - return vflat.Interface() -} - -// Reverse has a parametric type: -// -// func Reverse(xs []A) []A -// -// Reverse returns a new slice that is the reverse of `xs`. -func Reverse(xs interface{}) interface{} { - chk := ty.Check( - new(func([]ty.A) []ty.A), - xs) - vxs, tys := chk.Args[0], chk.Returns[0] - - xsLen := vxs.Len() - vys := reflect.MakeSlice(tys, xsLen, xsLen) - for i := 0; i < xsLen; i++ { - vys.Index(i).Set(vxs.Index(xsLen - 1 - i)) - } - return vys.Interface() -} - -// Copy has a parametric type: -// -// func Copy(xs []A) []A -// -// Copy returns a copy of `xs` using Go's `copy` operation. -func Copy(xs interface{}) interface{} { - chk := ty.Check( - new(func([]ty.A) []ty.A), - xs) - vxs, tys := chk.Args[0], chk.Returns[0] - - xsLen := vxs.Len() - vys := reflect.MakeSlice(tys, xsLen, xsLen) - reflect.Copy(vys, vxs) - return vys.Interface() -} - -// ParMap has a parametric type: -// -// func ParMap(f func(A) B, xs []A) []B -// -// ParMap is just like Map, except it applies `f` to each element in `xs` -// concurrently using N worker goroutines (where N is the number of CPUs -// available reported by the Go runtime). If you want to control the number -// of goroutines spawned, use `ParMapN`. -// -// It is important that `f` not be a trivial operation, otherwise the overhead -// of executing it concurrently will result in worse performance than using -// a `Map`. -func ParMap(f, xs interface{}) interface{} { - n := runtime.NumCPU() - if n < 1 { - n = 1 - } - return ParMapN(f, xs, n) -} - -// ParMapN has a parametric type: -// -// func ParMapN(f func(A) B, xs []A, n int) []B -// -// ParMapN is just like Map, except it applies `f` to each element in `xs` -// concurrently using `n` worker goroutines. -// -// It is important that `f` not be a trivial operation, otherwise the overhead -// of executing it concurrently will result in worse performance than using -// a `Map`. -func ParMapN(f, xs interface{}, n int) interface{} { - chk := ty.Check( - new(func(func(ty.A) ty.B, []ty.A) []ty.B), - f, xs) - vf, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0] - - xsLen := vxs.Len() - ys := reflect.MakeSlice(tys, xsLen, xsLen) - - if n < 1 { - n = 1 - } - work := make(chan int, n) - wg := new(sync.WaitGroup) - for i := 0; i < n; i++ { - wg.Add(1) - go func() { - for j := range work { - // Good golly miss molly. Is `reflect.Value.Index` - // safe to access/set from multiple goroutines? - // XXX: If not, we'll need an extra wave of allocation to - // use real slices of `reflect.Value`. - ys.Index(j).Set(call1(vf, vxs.Index(j))) - } - wg.Done() - }() - } - for i := 0; i < xsLen; i++ { - work <- i - } - close(work) - wg.Wait() - return ys.Interface() -} - -// Range generates a list of integers corresponding to every integer in -// the half-open interval [x, y). -// -// Range will panic if `end < start`. -func Range(start, end int) []int { - if end < start { - panic("range must have end greater than or equal to start") - } - r := make([]int, end-start) - for i := start; i < end; i++ { - r[i-start] = i - } - return r -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_parmap_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_parmap_test.go deleted file mode 100644 index 503b53dcb..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_parmap_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package fun - -import ( - "math" - "runtime" - "sync" - "testing" -) - -func TestParMap(t *testing.T) { - square := func(x int) int { return x * x } - squares := ParMap(square, []int{1, 2, 3, 4, 5}).([]int) - - assertDeep(t, squares, []int{1, 4, 9, 16, 25}) - assertDeep(t, []int{}, ParMap(square, []int{}).([]int)) -} - -func BenchmarkParMapSquare(b *testing.B) { - if flagBuiltin { - benchmarkParMapSquareBuiltin(b) - } else { - benchmarkParMapSquareReflect(b) - } -} - -func benchmarkParMapSquareReflect(b *testing.B) { - b.StopTimer() - square := func(a int64) int64 { return a * a } - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - _ = ParMap(square, list).([]int64) - } -} - -func benchmarkParMapSquareBuiltin(b *testing.B) { - b.StopTimer() - square := func(a int64) int64 { return a * a } - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - _ = parMapInt64(square, list) - } -} - -func BenchmarkParMapPrime(b *testing.B) { - if flagBuiltin { - benchmarkParMapPrimeBuiltin(b) - } else { - benchmarkParMapPrimeReflect(b) - } -} - -func benchmarkParMapPrimeReflect(b *testing.B) { - b.StopTimer() - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - _ = ParMap(primeFactors, list).([][]int64) - } -} - -func benchmarkParMapPrimeBuiltin(b *testing.B) { - b.StopTimer() - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - _ = parMapSliceInt64(primeFactors, list) - } -} - -func primeFactors(n int64) []int64 { - if isPrime(n) { - return []int64{n} - } - - bound := int64(math.Floor(math.Sqrt(float64(n)))) - for i := int64(2); i <= bound; i++ { - if n%i == 0 { - return append(primeFactors(i), primeFactors(n/i)...) - } - } - panic("unreachable") -} - -func isPrime(n int64) bool { - bound := int64(math.Floor(math.Sqrt(float64(n)))) - for i := int64(2); i <= bound; i++ { - if n%i == 0 { - return false - } - } - return true -} - -func parMapInt64(f func(n int64) int64, xs []int64) []int64 { - ys := make([]int64, len(xs), len(xs)) - N := runtime.NumCPU() - if N < 1 { - N = 1 - } - work := make(chan int, N) - wg := new(sync.WaitGroup) - for i := 0; i < N; i++ { - wg.Add(1) - go func() { - for j := range work { - ys[j] = f(xs[j]) - } - wg.Done() - }() - } - for i := 0; i < len(xs); i++ { - work <- i - } - close(work) - wg.Wait() - return ys -} - -func parMapSliceInt64(f func(n int64) []int64, xs []int64) [][]int64 { - ys := make([][]int64, len(xs), len(xs)) - N := runtime.NumCPU() - if N < 1 { - N = 1 - } - work := make(chan int, N) - wg := new(sync.WaitGroup) - for i := 0; i < N; i++ { - wg.Add(1) - go func() { - for j := range work { - ys[j] = f(xs[j]) - } - wg.Done() - }() - } - for i := 0; i < len(xs); i++ { - work <- i - } - close(work) - wg.Wait() - return ys -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_test.go deleted file mode 100644 index 1a124bf1e..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/list_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package fun - -import ( - "testing" -) - -func TestMap(t *testing.T) { - square := func(x int) int { return x * x } - squares := Map(square, []int{1, 2, 3, 4, 5}).([]int) - - assertDeep(t, squares, []int{1, 4, 9, 16, 25}) - assertDeep(t, []int{}, Map(square, []int{}).([]int)) - - strlen := func(s string) int { return len(s) } - lens := Map(strlen, []string{"abc", "ab", "a"}).([]int) - assertDeep(t, lens, []int{3, 2, 1}) -} - -func TestFilter(t *testing.T) { - even := func(x int) bool { return x%2 == 0 } - evens := Filter(even, []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}).([]int) - - assertDeep(t, evens, []int{2, 4, 6, 8, 10}) - assertDeep(t, []int{}, Filter(even, []int{}).([]int)) -} - -func TestFoldl(t *testing.T) { - // Use an operation that isn't associative so that we know we've got - // the left/right folds done correctly. - reducer := func(a, b int) int { return b % a } - v := Foldl(reducer, 7, []int{4, 5, 6}).(int) - - assertDeep(t, v, 3) - assertDeep(t, 0, Foldl(reducer, 0, []int{}).(int)) -} - -func TestFoldr(t *testing.T) { - // Use an operation that isn't associative so that we know we've got - // the left/right folds done correctly. - reducer := func(a, b int) int { return b % a } - v := Foldr(reducer, 7, []int{4, 5, 6}).(int) - - assertDeep(t, v, 1) - assertDeep(t, 0, Foldr(reducer, 0, []int{}).(int)) -} - -func TestConcat(t *testing.T) { - toflat := [][]int{ - {1, 2, 3}, - {4, 5, 6}, - {7, 8, 9}, - } - flat := Concat(toflat).([]int) - - assertDeep(t, flat, []int{1, 2, 3, 4, 5, 6, 7, 8, 9}) -} - -func TestReverse(t *testing.T) { - reversed := Reverse([]int{1, 2, 3, 4, 5}).([]int) - - assertDeep(t, reversed, []int{5, 4, 3, 2, 1}) -} - -func TestCopy(t *testing.T) { - orig := []int{1, 2, 3, 4, 5} - copied := Copy(orig).([]int) - - orig[1] = 999 - - assertDeep(t, copied, []int{1, 2, 3, 4, 5}) -} - -func TestPointers(t *testing.T) { - type temp struct { - val int - } - square := func(t *temp) *temp { return &temp{t.val * t.val} } - squares := Map(square, []*temp{ - {1}, {2}, {3}, {4}, {5}, - }) - - assertDeep(t, squares, []*temp{ - {1}, {4}, {9}, {16}, {25}, - }) -} - -func BenchmarkMapSquare(b *testing.B) { - if flagBuiltin { - benchmarkMapSquareBuiltin(b) - } else { - benchmarkMapSquareReflect(b) - } -} - -func benchmarkMapSquareReflect(b *testing.B) { - b.StopTimer() - square := func(a int64) int64 { return a * a } - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - _ = Map(square, list).([]int64) - } -} - -func benchmarkMapSquareBuiltin(b *testing.B) { - b.StopTimer() - square := func(a int64) int64 { return a * a } - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - ret := make([]int64, len(list)) - for i := 0; i < len(list); i++ { - ret[i] = square(list[i]) - } - } -} - -func BenchmarkMapPrime(b *testing.B) { - if flagBuiltin { - benchmarkMapPrimeBuiltin(b) - } else { - benchmarkMapPrimeReflect(b) - } -} - -func benchmarkMapPrimeReflect(b *testing.B) { - b.StopTimer() - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - _ = Map(primeFactors, list).([][]int64) - } -} - -func benchmarkMapPrimeBuiltin(b *testing.B) { - b.StopTimer() - list := randInt64Slice(1000, 1<<30) - b.StartTimer() - - for i := 0; i < b.N; i++ { - ret := make([][]int64, len(list)) - for i := 0; i < len(list); i++ { - ret[i] = primeFactors(list[i]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map.go deleted file mode 100644 index d2ff97f7f..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map.go +++ /dev/null @@ -1,46 +0,0 @@ -package fun - -import ( - "reflect" - - "github.com/BurntSushi/ty" -) - -// Keys has a parametric type: -// -// func Keys(m map[A]B) []A -// -// Keys returns a list of the keys of `m` in an unspecified order. -func Keys(m interface{}) interface{} { - chk := ty.Check( - new(func(map[ty.A]ty.B) []ty.A), - m) - vm, tkeys := chk.Args[0], chk.Returns[0] - - vkeys := reflect.MakeSlice(tkeys, vm.Len(), vm.Len()) - for i, vkey := range vm.MapKeys() { - vkeys.Index(i).Set(vkey) - } - return vkeys.Interface() -} - -// Values has a parametric type: -// -// func Values(m map[A]B) []B -// -// Values returns a list of the values of `m` in an unspecified order. -func Values(m interface{}) interface{} { - chk := ty.Check( - new(func(map[ty.A]ty.B) []ty.B), - m) - vm, tvals := chk.Args[0], chk.Returns[0] - - vvals := reflect.MakeSlice(tvals, vm.Len(), vm.Len()) - for i, vkey := range vm.MapKeys() { - vvals.Index(i).Set(vm.MapIndex(vkey)) - } - return vvals.Interface() -} - -// func MapMerge(m1, m2 interface{}) interface{} { -// } diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map_test.go deleted file mode 100644 index cc23b8859..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/map_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package fun - -import ( - "testing" -) - -func TestKeys(t *testing.T) { - m := map[string]int{ - "c": 0, "b": 0, "a": 0, - } - keys := Keys(m).([]string) - - scmp := func(a, b string) bool { return a < b } - assertDeep(t, []string{"a", "b", "c"}, QuickSort(scmp, keys)) -} - -func TestValues(t *testing.T) { - m := map[int]string{ - 1: "c", 2: "b", 3: "a", - } - vals := Values(m).([]string) - - scmp := func(a, b string) bool { return a < b } - assertDeep(t, []string{"a", "b", "c"}, QuickSort(scmp, vals)) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand.go deleted file mode 100644 index 2b0e34ae8..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand.go +++ /dev/null @@ -1,94 +0,0 @@ -package fun - -import ( - "math/rand" - "reflect" - "time" - - "github.com/BurntSushi/ty" -) - -var randNumGen *rand.Rand - -func init() { - randNumGen = rand.New(rand.NewSource(time.Now().UnixNano())) -} - -// ShuffleGen has a parametric type: -// -// func ShuffleGen(xs []A, rng *rand.Rand) -// -// ShuffleGen shuffles `xs` in place using the given random number -// generator `rng`. -func ShuffleGen(xs interface{}, rng *rand.Rand) { - chk := ty.Check( - new(func([]ty.A, *rand.Rand)), - xs, rng) - vxs := chk.Args[0] - - // Implements the Fisher-Yates shuffle: http://goo.gl/Hb9vg - xsLen := vxs.Len() - swapper := swapperOf(vxs.Type().Elem()) - for i := xsLen - 1; i >= 1; i-- { - j := rng.Intn(i + 1) - swapper.swap(vxs.Index(i), vxs.Index(j)) - } -} - -// Shuffle has a parametric type: -// -// func Shuffle(xs []A) -// -// Shuffle shuffles `xs` in place using a default random number -// generator seeded once at program initialization. -func Shuffle(xs interface{}) { - ShuffleGen(xs, randNumGen) -} - -// Sample has a parametric type: -// -// func Sample(population []A, n int) []A -// -// Sample returns a random sample of size `n` from a list -// `population` using a default random number generator seeded once at -// program initialization. -// All elements in `population` have an equal chance of being selected. -// If `n` is greater than the size of `population`, then `n` is set to -// the size of the population. -func Sample(population interface{}, n int) interface{} { - return SampleGen(population, n, randNumGen) -} - -// SampleGen has a parametric type: -// -// func SampleGen(population []A, n int, rng *rand.Rand) []A -// -// SampleGen returns a random sample of size `n` from a list -// `population` using a given random number generator `rng`. -// All elements in `population` have an equal chance of being selected. -// If `n` is greater than the size of `population`, then `n` is set to -// the size of the population. -func SampleGen(population interface{}, n int, rng *rand.Rand) interface{} { - chk := ty.Check( - new(func([]ty.A, int, *rand.Rand) []ty.A), - population, n, rng) - rpop, tsamp := chk.Args[0], chk.Returns[0] - - popLen := rpop.Len() - if n == 0 { - return reflect.MakeSlice(tsamp, 0, 0).Interface() - } - if n > popLen { - n = popLen - } - - // TODO(burntsushi): Implement an algorithm that doesn't depend on - // the size of the population. - - rsamp := reflect.MakeSlice(tsamp, n, n) - choices := rng.Perm(popLen) - for i := 0; i < n; i++ { - rsamp.Index(i).Set(rpop.Index(choices[i])) - } - return rsamp.Interface() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand_test.go deleted file mode 100644 index 821ed3501..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/rand_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package fun - -import ( - "math/rand" - "testing" -) - -func TestShuffle(t *testing.T) { - nums := Range(0, 100) - Shuffle(nums) - - assertDeep(t, Set(nums), Set(Range(0, 100))) -} - -func TestSample(t *testing.T) { - rng := rand.New(rand.NewSource(0)) - nums := Range(0, 100) - sample := SampleGen(nums, 3, rng).([]int) - - assertDeep(t, Set([]int{35, 40, 50}), Set(sample)) -} - -func BenchmarkShuffle(b *testing.B) { - if flagBuiltin { - benchmarkShuffleBuiltin(b) - } else { - benchmarkShuffleReflect(b) - } -} - -func benchmarkShuffleBuiltin(b *testing.B) { - b.StopTimer() - list := randIntSlice(10000, 0) - b.StartTimer() - - for i := 0; i < b.N; i++ { - shuffle(list) - } -} - -func benchmarkShuffleReflect(b *testing.B) { - b.StopTimer() - list := randIntSlice(10000, 0) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Shuffle(list) - } -} - -func shuffle(xs []int) { - for i := len(xs) - 1; i >= 1; i-- { - j := randNumGen.Intn(i + 1) - xs[i], xs[j] = xs[j], xs[i] - } -} - -func BenchmarkSample(b *testing.B) { - if flagBuiltin { - benchmarkSampleBuiltin(b) - } else { - benchmarkSampleReflect(b) - } -} - -func benchmarkSampleBuiltin(b *testing.B) { - b.StopTimer() - list := randIntSlice(10000, 0) - b.StartTimer() - - for i := 0; i < b.N; i++ { - sample(list, 100) - } -} - -func benchmarkSampleReflect(b *testing.B) { - b.StopTimer() - list := randIntSlice(10000, 0) - b.StartTimer() - - for i := 0; i < b.N; i++ { - Sample(list, 100) - } -} - -func sample(pop []int, n int) []int { - if n == 0 { - return []int{} - } - if n > len(pop) { - n = len(pop) - } - - samp := make([]int, n, n) - choices := randNumGen.Perm(len(pop)) - for i := 0; i < n; i++ { - samp[i] = pop[choices[i]] - } - return samp -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set.go deleted file mode 100644 index 4aa4f4d76..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set.go +++ /dev/null @@ -1,99 +0,0 @@ -package fun - -import ( - "reflect" - - "github.com/BurntSushi/ty" -) - -// Set has a parametric type: -// -// func Set(xs []A) map[A]bool -// -// Set creates a set from a list. -func Set(xs interface{}) interface{} { - chk := ty.Check( - new(func([]ty.A) map[ty.A]bool), - xs) - vxs, tset := chk.Args[0], chk.Returns[0] - - vtrue := reflect.ValueOf(true) - vset := reflect.MakeMap(tset) - xsLen := vxs.Len() - for i := 0; i < xsLen; i++ { - vset.SetMapIndex(vxs.Index(i), vtrue) - } - return vset.Interface() -} - -// Union has a parametric type: -// -// func Union(a map[A]bool, b map[A]bool) map[A]bool -// -// Union returns the union of two sets, where a set is represented as a -// `map[A]bool`. The sets `a` and `b` are not modified. -func Union(a, b interface{}) interface{} { - chk := ty.Check( - new(func(map[ty.A]bool, map[ty.A]bool) map[ty.A]bool), - a, b) - va, vb, tc := chk.Args[0], chk.Args[1], chk.Returns[0] - - vtrue := reflect.ValueOf(true) - vc := reflect.MakeMap(tc) - for _, vkey := range va.MapKeys() { - vc.SetMapIndex(vkey, vtrue) - } - for _, vkey := range vb.MapKeys() { - vc.SetMapIndex(vkey, vtrue) - } - return vc.Interface() -} - -// Intersection has a parametric type: -// -// func Intersection(a map[A]bool, b map[A]bool) map[A]bool -// -// Intersection returns the intersection of two sets, where a set is -// represented as a `map[A]bool`. The sets `a` and `b` are not modified. -func Intersection(a, b interface{}) interface{} { - chk := ty.Check( - new(func(map[ty.A]bool, map[ty.A]bool) map[ty.A]bool), - a, b) - va, vb, tc := chk.Args[0], chk.Args[1], chk.Returns[0] - - vtrue := reflect.ValueOf(true) - vc := reflect.MakeMap(tc) - for _, vkey := range va.MapKeys() { - if vb.MapIndex(vkey).IsValid() { - vc.SetMapIndex(vkey, vtrue) - } - } - for _, vkey := range vb.MapKeys() { - if va.MapIndex(vkey).IsValid() { - vc.SetMapIndex(vkey, vtrue) - } - } - return vc.Interface() -} - -// Difference has a parametric type: -// -// func Difference(a map[A]bool, b map[A]bool) map[A]bool -// -// Difference returns a set with all elements in `a` that are not in `b`. -// The sets `a` and `b` are not modified. -func Difference(a, b interface{}) interface{} { - chk := ty.Check( - new(func(map[ty.A]bool, map[ty.A]bool) map[ty.A]bool), - a, b) - va, vb, tc := chk.Args[0], chk.Args[1], chk.Returns[0] - - vtrue := reflect.ValueOf(true) - vc := reflect.MakeMap(tc) - for _, vkey := range va.MapKeys() { - if !vb.MapIndex(vkey).IsValid() { - vc.SetMapIndex(vkey, vtrue) - } - } - return vc.Interface() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set_test.go deleted file mode 100644 index 9e4f4a596..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/set_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package fun - -import ( - "testing" -) - -func TestSet(t *testing.T) { - a := []string{"andrew", "plato", "andrew", "cauchy", "cauchy", "andrew"} - set := Set(a).(map[string]bool) - - assertDeep(t, set, map[string]bool{ - "andrew": true, - "plato": true, - "cauchy": true, - }) -} - -func TestUnion(t *testing.T) { - a := map[string]bool{ - "springsteen": true, - "jgeils": true, - "seger": true, - "metallica": true, - } - b := map[string]bool{ - "metallica": true, - "chesney": true, - "mcgraw": true, - "cash": true, - } - c := Union(a, b).(map[string]bool) - - assertDeep(t, c, map[string]bool{ - "springsteen": true, - "jgeils": true, - "seger": true, - "metallica": true, - "chesney": true, - "mcgraw": true, - "cash": true, - }) -} - -func TestIntersection(t *testing.T) { - a := map[string]bool{ - "springsteen": true, - "jgeils": true, - "seger": true, - "metallica": true, - } - b := map[string]bool{ - "metallica": true, - "chesney": true, - "mcgraw": true, - "cash": true, - } - c := Intersection(a, b).(map[string]bool) - - assertDeep(t, c, map[string]bool{ - "metallica": true, - }) -} - -func TestDifference(t *testing.T) { - a := map[string]bool{ - "springsteen": true, - "jgeils": true, - "seger": true, - "metallica": true, - } - b := map[string]bool{ - "metallica": true, - "chesney": true, - "mcgraw": true, - "cash": true, - } - c := Difference(a, b).(map[string]bool) - - assertDeep(t, c, map[string]bool{ - "springsteen": true, - "jgeils": true, - "seger": true, - }) -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort.go deleted file mode 100644 index b109147aa..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort.go +++ /dev/null @@ -1,98 +0,0 @@ -package fun - -import ( - "reflect" - "sort" - - "github.com/BurntSushi/ty" -) - -// QuickSort has a parametric type: -// -// func QuickSort(less func(x1 A, x2 A) bool, []A) []A -// -// QuickSort applies the "quicksort" algorithm to return a new sorted list -// of `xs`, where `xs` is not modified. -// -// `less` should be a function that returns true if and only if `x1` is less -// than `x2`. -func QuickSort(less, xs interface{}) interface{} { - chk := ty.Check( - new(func(func(ty.A, ty.A) bool, []ty.A) []ty.A), - less, xs) - vless, vxs, tys := chk.Args[0], chk.Args[1], chk.Returns[0] - - var qsort func(left, right int) - var partition func(left, right, pivot int) int - xsind := Range(0, vxs.Len()) - - qsort = func(left, right int) { - if left >= right { - return - } - pivot := (left + right) / 2 - pivot = partition(left, right, pivot) - - qsort(left, pivot-1) - qsort(pivot+1, right) - } - partition = func(left, right, pivot int) int { - vpivot := xsind[pivot] - xsind[pivot], xsind[right] = xsind[right], xsind[pivot] - - ind := left - for i := left; i < right; i++ { - if call1(vless, vxs.Index(xsind[i]), vxs.Index(vpivot)).Bool() { - xsind[i], xsind[ind] = xsind[ind], xsind[i] - ind++ - } - } - xsind[ind], xsind[right] = xsind[right], xsind[ind] - return ind - } - - // Sort `xsind` in place. - qsort(0, len(xsind)-1) - - vys := reflect.MakeSlice(tys, len(xsind), len(xsind)) - for i, xsIndex := range xsind { - vys.Index(i).Set(vxs.Index(xsIndex)) - } - return vys.Interface() -} - -// Sort has a parametric type: -// -// func Sort(less func(x1 A, x2 A) bool, []A) -// -// Sort uses the standard library `sort` package to sort `xs` in place. -// -// `less` should be a function that returns true if and only if `x1` is less -// than `x2`. -func Sort(less, xs interface{}) { - chk := ty.Check( - new(func(func(ty.A, ty.A) bool, []ty.A)), - less, xs) - - vless, vxs := chk.Args[0], chk.Args[1] - sort.Sort(&sortable{vless, vxs, swapperOf(vxs.Type().Elem())}) -} - -type sortable struct { - less reflect.Value - xs reflect.Value - swapper swapper -} - -func (s *sortable) Less(i, j int) bool { - ith, jth := s.xs.Index(i), s.xs.Index(j) - return call1(s.less, ith, jth).Bool() -} - -func (s *sortable) Swap(i, j int) { - s.swapper.swap(s.xs.Index(i), s.xs.Index(j)) -} - -func (s *sortable) Len() int { - return s.xs.Len() -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort_test.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort_test.go deleted file mode 100644 index b4548b2d5..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/sort_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package fun - -import ( - "sort" - "testing" -) - -func TestSort(t *testing.T) { - tosort := []int{10, 3, 5, 1, 15, 6} - Sort(func(a, b int) bool { return b < a }, tosort) - - assertDeep(t, tosort, []int{15, 10, 6, 5, 3, 1}) -} - -func TestQuickSort(t *testing.T) { - tosort := []int{10, 3, 5, 1, 15, 6} - sorted := QuickSort( - func(a, b int) bool { - return b < a - }, tosort).([]int) - - assertDeep(t, sorted, []int{15, 10, 6, 5, 3, 1}) -} - -func BenchmarkSort(b *testing.B) { - if flagBuiltin { - benchmarkSortBuiltin(b) - } else { - benchmarkSortReflect(b) - } -} - -func benchmarkSortReflect(b *testing.B) { - less := func(a, b int) bool { return a < b } - - for i := 0; i < b.N; i++ { - b.StopTimer() - list := randIntSlice(1000, 0) - b.StartTimer() - - Sort(less, list) - } -} - -func benchmarkSortBuiltin(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - list := randIntSlice(1000, 0) - b.StartTimer() - - sort.Sort(sort.IntSlice(list)) - } -} - -func BenchmarkQuickSort(b *testing.B) { - if flagBuiltin { - benchmarkQuickSortBuiltin(b) - } else { - benchmarkQuickSortReflect(b) - } -} - -func benchmarkQuickSortReflect(b *testing.B) { - less := func(a, b int) bool { return a < b } - - for i := 0; i < b.N; i++ { - b.StopTimer() - list := randIntSlice(1000, 0) - b.StartTimer() - - _ = QuickSort(less, list) - } -} - -func benchmarkQuickSortBuiltin(b *testing.B) { - less := func(a, b int) bool { return a < b } - - quicksort := func(xs []int) []int { - ys := make([]int, len(xs)) - copy(ys, xs) - - var qsort func(left, right int) - var partition func(left, right, pivot int) int - - qsort = func(left, right int) { - if left >= right { - return - } - pivot := (left + right) / 2 - pivot = partition(left, right, pivot) - - qsort(left, pivot-1) - qsort(pivot+1, right) - } - partition = func(left, right, pivot int) int { - vpivot := ys[pivot] - ys[pivot], ys[right] = ys[right], ys[pivot] - - ind := left - for i := left; i < right; i++ { - if less(ys[i], vpivot) { - ys[i], ys[ind] = ys[ind], ys[i] - ind++ - } - } - ys[ind], ys[right] = ys[right], ys[ind] - return ind - } - - return ys - } - - for i := 0; i < b.N; i++ { - b.StopTimer() - list := randIntSlice(1000, 0) - b.StartTimer() - - _ = quicksort(list) - } -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/util.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/util.go deleted file mode 100644 index a50895886..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/fun/util.go +++ /dev/null @@ -1,37 +0,0 @@ -package fun - -import ( - "reflect" -) - -func zeroValue(typ reflect.Type) reflect.Value { - return reflect.New(typ).Elem() -} - -type swapper reflect.Value - -func swapperOf(typ reflect.Type) swapper { - return swapper(zeroValue(typ)) -} - -func (s swapper) swap(a, b reflect.Value) { - vs := reflect.Value(s) - vs.Set(a) - a.Set(b) - b.Set(vs) -} - -func call(f reflect.Value, args ...reflect.Value) { - f.Call(args) -} - -func call1(f reflect.Value, args ...reflect.Value) reflect.Value { - return f.Call(args)[0] -} - -func call2(f reflect.Value, args ...reflect.Value) ( - reflect.Value, reflect.Value) { - - ret := f.Call(args) - return ret[0], ret[1] -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/perf/builtin-vs-reflect.bench b/Godeps/_workspace/src/github.com/BurntSushi/ty/perf/builtin-vs-reflect.bench deleted file mode 100644 index 312bae0b5..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/perf/builtin-vs-reflect.bench +++ /dev/null @@ -1,35 +0,0 @@ -benchmark old ns/op new ns/op delta -BenchmarkFibonacciMemo-12 5895 43896 +644.63% -BenchmarkFibonacciNoMemo-12 6827001 6829859 +0.04% -BenchmarkParMapSquare-12 408320 572307 +40.16% -BenchmarkParMapPrime-12 5289594 5510075 +4.17% -BenchmarkMapSquare-12 8499 457844 +5287.03% -BenchmarkMapPrime-12 34265372 32220176 -5.97% -BenchmarkShuffle-12 240036 1018408 +324.27% -BenchmarkSample-12 262565 271122 +3.26% -BenchmarkSort-12 137293 7716737 +5520.63% -BenchmarkQuickSort-12 6325 6051563 +95576.89% - -benchmark old allocs new allocs delta -BenchmarkFibonacciMemo-12 11 198 1700.00% -BenchmarkFibonacciNoMemo-12 0 0 n/a% -BenchmarkParMapSquare-12 21 2038 9604.76% -BenchmarkParMapPrime-12 7148 9160 28.15% -BenchmarkMapSquare-12 1 2014 201300.00% -BenchmarkMapPrime-12 7105 9173 29.11% -BenchmarkShuffle-12 0 7 n/a% -BenchmarkSample-12 2 11 450.00% -BenchmarkSort-12 1 27032 2703100.00% -BenchmarkQuickSort-12 1 22008 2200700.00% - -benchmark old bytes new bytes delta -BenchmarkFibonacciMemo-12 2449 12083 393.39% -BenchmarkFibonacciNoMemo-12 0 0 n/a% -BenchmarkParMapSquare-12 9127 60330 561.01% -BenchmarkParMapPrime-12 156540 222731 42.28% -BenchmarkMapSquare-12 8195 58053 608.40% -BenchmarkMapPrime-12 155952 221255 41.87% -BenchmarkShuffle-12 0 500 n/a% -BenchmarkSample-12 82785 83383 0.72% -BenchmarkSort-12 35 883869 2525240.00% -BenchmarkQuickSort-12 8195 735645 8876.75% diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/session.vim b/Godeps/_workspace/src/github.com/BurntSushi/ty/session.vim deleted file mode 100644 index 562164be0..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/type-check.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/type-check.go deleted file mode 100644 index 9741daefd..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/type-check.go +++ /dev/null @@ -1,338 +0,0 @@ -package ty - -import ( - "fmt" - "reflect" - "strings" -) - -// TypeError corresponds to any error reported by the `Check` function. -// Since `Check` panics, if you want to run `Check` safely, it is -// appropriate to recover and use a type switch to discover a `TypeError` -// value. -type TypeError string - -func (te TypeError) Error() string { - return string(te) -} - -func pe(format string, v ...interface{}) TypeError { - return TypeError(fmt.Sprintf(format, v...)) -} - -func ppe(format string, v ...interface{}) { - panic(pe(format, v...)) -} - -// Typed corresponds to the information returned by `Check`. -type Typed struct { - // In correspondence with the `as` parameter to `Check`. - Args []reflect.Value - - // In correspondence with the return types of `f` in `Check`. - Returns []reflect.Type - - // The type environment generated via unification in `Check`. - // (Its usefulness in the public API is questionable.) - TypeEnv map[string]reflect.Type -} - -// Check accepts a function `f`, which may have a parametric type, along with a -// number of arguments in correspondence with the arguments to `f`, -// and returns inferred Go type information. This type information includes -// a list of `reflect.Value` in correspondence with `as`, a list of -// `reflect.Type` in correspondence with the return types of `f` and a type -// environment mapping type variables to `reflect.Type`. -// -// The power of `Check` comes from the following invariant: if `Check` returns, -// then the types of the arguments corresponding to `as` are consistent -// with the parametric type of `f`, *and* the parametric return types of `f` -// were made into valid Go types that are not parametric. Otherwise, there is -// a bug in `Check`. -// -// More concretely, consider a simple parametric function `Map`, which -// transforms a list of elements by applying a function to each element in -// order to generate a new list. Such a function constructed only for integers -// might have a type like -// -// func Map(func(int) int, []int) []int -// -// But the parametric type of `Map` could be given with -// -// func Map(func(A) B, []A) []B -// -// which in English reads, "Given a function from any type `A` to any type `B` -// and a slice of `A`, `Map` returns a slice of `B`." -// -// To write a parametric function like `Map`, one can pass a pointer -// to a nil function of the desired parametric type to get the reflection -// information: -// -// func Map(f, xs interface{}) interface{} { -// // Given the parametric type and the arguments, Check will -// // return all the reflection information you need to write `Map`. -// uni := ty.Check( -// new(func(func(ty.A) ty.B, []ty.A) []ty.B), -// f, xs) -// -// // `vf` and `vxs` are `reflect.Value`s of `f` and `xs`. -// vf, vxs := uni.Args[0], uni.Args[1] -// -// // `tys` is a `reflect.Type` of `[]ty.B` where `ty.B` is replaced -// // with the return type of the given function `f`. -// tys := uni.Returns[0] -// -// // Given the promise of `Check`, we now know that `vf` has -// // type `func(ty.A) ty.B` and `vxs` has type `[]ty.A`. -// xsLen := vxs.Len() -// -// // Constructs a new slice which will have type `[]ty.B`. -// vys := reflect.MakeSlice(tys, xsLen, xsLen) -// -// // Actually perform the `Map` operation, but in the world of -// // reflection. -// for i := 0; i < xsLen; i++ { -// vy := vf.Call([]reflect.Value{vxs.Index(i)})[0] -// vys.Index(i).Set(vy) -// } -// -// // The `reflect.Value.Interface` method is how we exit the world of -// // reflection. The onus is now on the caller to type assert it to -// // the appropriate type. -// return vys.Interface() -// } -// -// Working in the reflection world is certainly more inconvenient than writing -// regular Go code, but the information and invariants held by `Check` provide -// a more convenient experience than how one normally works with reflection. -// (Notice that there is no error-prone type switching or boiler plate to -// construct new types, since `Check` guarantees the types are consistent -// with the inputs for us.) -// -// And while writing such functions is still not so convenient, -// invoking them is simple: -// -// square := func(x int) int { return x * x } -// squared := Map(square, []int{1, 2, 3, 4, 5}).([]int) -// -// Restrictions -// -// There are a few restrictions imposed on the parametric return types of -// `f`: type variables may only be found in types that can be composed by the -// `reflect` package. This *only* includes channels, maps, pointers and slices. -// If a type variable is found in an array, function or struct, `Check` will -// panic. -// -// Also, type variables inside of structs are ignored in the types of the -// arguments `as`. This restriction may be lifted in the future. -// -// To be clear: type variables *may* appear in arrays or functions in the types -// of the arguments `as`. -func Check(f interface{}, as ...interface{}) *Typed { - rf := reflect.ValueOf(f) - tf := rf.Type() - - if tf.Kind() == reflect.Ptr { - rf = reflect.Indirect(rf) - tf = rf.Type() - } - if tf.Kind() != reflect.Func { - ppe("The type of `f` must be a function, but it is a '%s'.", tf.Kind()) - } - if tf.NumIn() != len(as) { - ppe("`f` expects %d arguments, but only %d were given.", - tf.NumIn(), len(as)) - } - - // Populate the argument value list. - args := make([]reflect.Value, len(as)) - for i := 0; i < len(as); i++ { - args[i] = reflect.ValueOf(as[i]) - } - - // Populate our type variable environment through unification. - tyenv := make(tyenv) - for i := 0; i < len(args); i++ { - tp := typePair{tyenv, tf.In(i), args[i].Type()} - - // Mutates the type variable environment. - if err := tp.unify(tp.param, tp.input); err != nil { - argTypes := make([]string, len(args)) - for i := range args { - argTypes[i] = args[i].Type().String() - } - ppe("\nError type checking\n\t%s\nwith argument types\n\t(%s)\n%s", - tf, strings.Join(argTypes, ", "), err) - } - } - - // Now substitute those types into the return types of `f`. - retTypes := make([]reflect.Type, tf.NumOut()) - for i := 0; i < tf.NumOut(); i++ { - retTypes[i] = (&returnType{tyenv, tf.Out(i)}).tysubst(tf.Out(i)) - } - return &Typed{args, retTypes, map[string]reflect.Type(tyenv)} -} - -// tyenv maps type variable names to their inferred Go type. -type tyenv map[string]reflect.Type - -// typePair represents a pair of types to be unified. They act as a way to -// report sensible error messages from within the unification algorithm. -// -// It also includes a type environment, which is mutated during unification. -type typePair struct { - tyenv tyenv - param reflect.Type - input reflect.Type -} - -func (tp typePair) error(format string, v ...interface{}) error { - return pe("Type error when unifying type '%s' and '%s': %s", - tp.param, tp.input, fmt.Sprintf(format, v...)) -} - -// unify attempts to satisfy a pair of types, where the `param` type is the -// expected type of a function argument and the `input` type is the known -// type of a function argument. The `param` type may be parametric (that is, -// it may contain a type that is convertible to TypeVariable) but the -// `input` type may *not* be parametric. -// -// Any failure to unify the two types results in a panic. -// -// The end result of unification is a type environment: a set of substitutions -// from type variable to a Go type. -func (tp typePair) unify(param, input reflect.Type) error { - if tyname := tyvarName(input); len(tyname) > 0 { - return tp.error("Type variables are not allowed in the types of " + - "arguments.") - } - if tyname := tyvarName(param); len(tyname) > 0 { - if cur, ok := tp.tyenv[tyname]; ok && cur != input { - return tp.error("Type variable %s expected type '%s' but got '%s'.", - tyname, cur, input) - } else if !ok { - tp.tyenv[tyname] = input - } - return nil - } - if param.Kind() != input.Kind() { - return tp.error("Cannot unify different kinds of types '%s' and '%s'.", - param, input) - } - - switch param.Kind() { - case reflect.Array: - return tp.unify(param.Elem(), input.Elem()) - case reflect.Chan: - if param.ChanDir() != input.ChanDir() { - return tp.error("Cannot unify '%s' with '%s' "+ - "(channel directions are different: '%s' != '%s').", - param, input, param.ChanDir(), input.ChanDir()) - } - return tp.unify(param.Elem(), input.Elem()) - case reflect.Func: - if param.NumIn() != input.NumIn() || param.NumOut() != input.NumOut() { - return tp.error("Cannot unify '%s' with '%s'.", param, input) - } - for i := 0; i < param.NumIn(); i++ { - if err := tp.unify(param.In(i), input.In(i)); err != nil { - return err - } - } - for i := 0; i < param.NumOut(); i++ { - if err := tp.unify(param.Out(i), input.Out(i)); err != nil { - return err - } - } - case reflect.Map: - if err := tp.unify(param.Key(), input.Key()); err != nil { - return err - } - return tp.unify(param.Elem(), input.Elem()) - case reflect.Ptr: - return tp.unify(param.Elem(), input.Elem()) - case reflect.Slice: - return tp.unify(param.Elem(), input.Elem()) - } - - // The only other container types are Interface and Struct. - // I am unsure about what to do with interfaces. Mind is fuzzy. - // Structs? I don't think it really makes much sense to use type - // variables inside of them. - return nil -} - -// returnType corresponds to the type of a single return value of a function, -// in which the type may be parametric. It also contains a type environment -// constructed from unification. -type returnType struct { - tyenv tyenv - typ reflect.Type -} - -func (rt returnType) panic(format string, v ...interface{}) { - ppe("Error substituting in return type '%s': %s", - rt.typ, fmt.Sprintf(format, v...)) -} - -// tysubst attempts to substitute all type variables within a single return -// type with their corresponding Go type from the type environment. -// -// tysubst will panic if a type variable is unbound, or if it encounters a -// type that cannot be dynamically created. Such types include arrays, -// functions and structs. (A limitation of the `reflect` package.) -func (rt returnType) tysubst(typ reflect.Type) reflect.Type { - if tyname := tyvarName(typ); len(tyname) > 0 { - if thetype, ok := rt.tyenv[tyname]; !ok { - rt.panic("Unbound type variable %s.", tyname) - } else { - return thetype - } - } - - switch typ.Kind() { - case reflect.Array: - rt.panic("Cannot dynamically create Array types.") - case reflect.Chan: - return reflect.ChanOf(typ.ChanDir(), rt.tysubst(typ.Elem())) - case reflect.Func: - rt.panic("Cannot dynamically create Function types.") - case reflect.Interface: - // rt.panic("TODO") - // Not sure if this is right. - return typ - case reflect.Map: - return reflect.MapOf(rt.tysubst(typ.Key()), rt.tysubst(typ.Elem())) - case reflect.Ptr: - return reflect.PtrTo(rt.tysubst(typ.Elem())) - case reflect.Slice: - return reflect.SliceOf(rt.tysubst(typ.Elem())) - case reflect.Struct: - rt.panic("Cannot dynamically create Struct types.") - case reflect.UnsafePointer: - rt.panic("Cannot dynamically create unsafe.Pointer types.") - } - - // We've covered all the composite types, so we're only left with - // base types. - return typ -} - -func tyvarName(t reflect.Type) string { - if !t.ConvertibleTo(tyvarUnderlyingType) { - return "" - } - return t.Name() -} - -// AssertType panics with a `TypeError` if `v` does not have type `t`. -// Otherwise, it returns the `reflect.Value` of `v`. -func AssertType(v interface{}, t reflect.Type) reflect.Value { - rv := reflect.ValueOf(v) - tv := rv.Type() - if tv != t { - ppe("Value '%v' has type '%s' but expected '%s'.", v, tv, t) - } - return rv -} diff --git a/Godeps/_workspace/src/github.com/BurntSushi/ty/tyvars.go b/Godeps/_workspace/src/github.com/BurntSushi/ty/tyvars.go deleted file mode 100644 index 73b19e8cc..000000000 --- a/Godeps/_workspace/src/github.com/BurntSushi/ty/tyvars.go +++ /dev/null @@ -1,28 +0,0 @@ -package ty - -import ( - "reflect" -) - -// TypeVariable is the underlying type of every type variable used in -// parametric types. It should not be used directly. Instead, use -// -// type myOwnTypeVariable TypeVariable -// -// to create your own type variable. For your convenience, this package -// defines some type variables for you. (e.g., `A`, `B`, `C`, ...) -type TypeVariable struct { - noImitation struct{} -} - -// tyvarUnderlyingType is used to discover types that are type variables. -// Namely, any type variable must be convertible to `TypeVariable`. -var tyvarUnderlyingType = reflect.TypeOf(TypeVariable{}) - -type A TypeVariable -type B TypeVariable -type C TypeVariable -type D TypeVariable -type E TypeVariable -type F TypeVariable -type G TypeVariable diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/README.md b/Godeps/_workspace/src/github.com/alecthomas/template/README.md deleted file mode 100644 index ef6a8ee30..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/README.md +++ /dev/null @@ -1,25 +0,0 @@ -# Go's `text/template` package with newline elision - -This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline. - -eg. - -``` -{{if true}}\ -hello -{{end}}\ -``` - -Will result in: - -``` -hello\n -``` - -Rather than: - -``` -\n -hello\n -\n -``` diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/doc.go b/Godeps/_workspace/src/github.com/alecthomas/template/doc.go deleted file mode 100644 index 223c595c2..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/doc.go +++ /dev/null @@ -1,406 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package template implements data-driven templates for generating textual output. - -To generate HTML output, see package html/template, which has the same interface -as this package but automatically secures HTML output against certain attacks. - -Templates are executed by applying them to a data structure. Annotations in the -template refer to elements of the data structure (typically a field of a struct -or a key in a map) to control execution and derive values to be displayed. -Execution of the template walks the structure and sets the cursor, represented -by a period '.' and called "dot", to the value at the current location in the -structure as execution proceeds. - -The input text for a template is UTF-8-encoded text in any format. -"Actions"--data evaluations or control structures--are delimited by -"{{" and "}}"; all text outside actions is copied to the output unchanged. -Actions may not span newlines, although comments can. - -Once parsed, a template may be executed safely in parallel. - -Here is a trivial example that prints "17 items are made of wool". - - type Inventory struct { - Material string - Count uint - } - sweaters := Inventory{"wool", 17} - tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}") - if err != nil { panic(err) } - err = tmpl.Execute(os.Stdout, sweaters) - if err != nil { panic(err) } - -More intricate examples appear below. - -Actions - -Here is the list of actions. "Arguments" and "pipelines" are evaluations of -data, defined in detail below. - -*/ -// {{/* a comment */}} -// A comment; discarded. May contain newlines. -// Comments do not nest and must start and end at the -// delimiters, as shown here. -/* - - {{pipeline}} - The default textual representation of the value of the pipeline - is copied to the output. - - {{if pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, T1 is executed. The empty values are false, 0, any - nil pointer or interface value, and any array, slice, map, or - string of length zero. - Dot is unaffected. - - {{if pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, T0 is executed; - otherwise, T1 is executed. Dot is unaffected. - - {{if pipeline}} T1 {{else if pipeline}} T0 {{end}} - To simplify the appearance of if-else chains, the else action - of an if may include another if directly; the effect is exactly - the same as writing - {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}} - - {{range pipeline}} T1 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, nothing is output; - otherwise, dot is set to the successive elements of the array, - slice, or map and T1 is executed. If the value is a map and the - keys are of basic type with a defined order ("comparable"), the - elements will be visited in sorted key order. - - {{range pipeline}} T1 {{else}} T0 {{end}} - The value of the pipeline must be an array, slice, map, or channel. - If the value of the pipeline has length zero, dot is unaffected and - T0 is executed; otherwise, dot is set to the successive elements - of the array, slice, or map and T1 is executed. - - {{template "name"}} - The template with the specified name is executed with nil data. - - {{template "name" pipeline}} - The template with the specified name is executed with dot set - to the value of the pipeline. - - {{with pipeline}} T1 {{end}} - If the value of the pipeline is empty, no output is generated; - otherwise, dot is set to the value of the pipeline and T1 is - executed. - - {{with pipeline}} T1 {{else}} T0 {{end}} - If the value of the pipeline is empty, dot is unaffected and T0 - is executed; otherwise, dot is set to the value of the pipeline - and T1 is executed. - -Arguments - -An argument is a simple value, denoted by one of the following. - - - A boolean, string, character, integer, floating-point, imaginary - or complex constant in Go syntax. These behave like Go's untyped - constants, although raw strings may not span newlines. - - The keyword nil, representing an untyped Go nil. - - The character '.' (period): - . - The result is the value of dot. - - A variable name, which is a (possibly empty) alphanumeric string - preceded by a dollar sign, such as - $piOver2 - or - $ - The result is the value of the variable. - Variables are described below. - - The name of a field of the data, which must be a struct, preceded - by a period, such as - .Field - The result is the value of the field. Field invocations may be - chained: - .Field1.Field2 - Fields can also be evaluated on variables, including chaining: - $x.Field1.Field2 - - The name of a key of the data, which must be a map, preceded - by a period, such as - .Key - The result is the map element value indexed by the key. - Key invocations may be chained and combined with fields to any - depth: - .Field1.Key1.Field2.Key2 - Although the key must be an alphanumeric identifier, unlike with - field names they do not need to start with an upper case letter. - Keys can also be evaluated on variables, including chaining: - $x.key1.key2 - - The name of a niladic method of the data, preceded by a period, - such as - .Method - The result is the value of invoking the method with dot as the - receiver, dot.Method(). Such a method must have one return value (of - any type) or two return values, the second of which is an error. - If it has two and the returned error is non-nil, execution terminates - and an error is returned to the caller as the value of Execute. - Method invocations may be chained and combined with fields and keys - to any depth: - .Field1.Key1.Method1.Field2.Key2.Method2 - Methods can also be evaluated on variables, including chaining: - $x.Method1.Field - - The name of a niladic function, such as - fun - The result is the value of invoking the function, fun(). The return - types and values behave as in methods. Functions and function - names are described below. - - A parenthesized instance of one the above, for grouping. The result - may be accessed by a field or map key invocation. - print (.F1 arg1) (.F2 arg2) - (.StructValuedMethod "arg").Field - -Arguments may evaluate to any type; if they are pointers the implementation -automatically indirects to the base type when required. -If an evaluation yields a function value, such as a function-valued -field of a struct, the function is not invoked automatically, but it -can be used as a truth value for an if action and the like. To invoke -it, use the call function, defined below. - -A pipeline is a possibly chained sequence of "commands". A command is a simple -value (argument) or a function or method call, possibly with multiple arguments: - - Argument - The result is the value of evaluating the argument. - .Method [Argument...] - The method can be alone or the last element of a chain but, - unlike methods in the middle of a chain, it can take arguments. - The result is the value of calling the method with the - arguments: - dot.Method(Argument1, etc.) - functionName [Argument...] - The result is the value of calling the function associated - with the name: - function(Argument1, etc.) - Functions and function names are described below. - -Pipelines - -A pipeline may be "chained" by separating a sequence of commands with pipeline -characters '|'. In a chained pipeline, the result of the each command is -passed as the last argument of the following command. The output of the final -command in the pipeline is the value of the pipeline. - -The output of a command will be either one value or two values, the second of -which has type error. If that second value is present and evaluates to -non-nil, execution terminates and the error is returned to the caller of -Execute. - -Variables - -A pipeline inside an action may initialize a variable to capture the result. -The initialization has syntax - - $variable := pipeline - -where $variable is the name of the variable. An action that declares a -variable produces no output. - -If a "range" action initializes a variable, the variable is set to the -successive elements of the iteration. Also, a "range" may declare two -variables, separated by a comma: - - range $index, $element := pipeline - -in which case $index and $element are set to the successive values of the -array/slice index or map key and element, respectively. Note that if there is -only one variable, it is assigned the element; this is opposite to the -convention in Go range clauses. - -A variable's scope extends to the "end" action of the control structure ("if", -"with", or "range") in which it is declared, or to the end of the template if -there is no such control structure. A template invocation does not inherit -variables from the point of its invocation. - -When execution begins, $ is set to the data argument passed to Execute, that is, -to the starting value of dot. - -Examples - -Here are some example one-line templates demonstrating pipelines and variables. -All produce the quoted word "output": - - {{"\"output\""}} - A string constant. - {{`"output"`}} - A raw string constant. - {{printf "%q" "output"}} - A function call. - {{"output" | printf "%q"}} - A function call whose final argument comes from the previous - command. - {{printf "%q" (print "out" "put")}} - A parenthesized argument. - {{"put" | printf "%s%s" "out" | printf "%q"}} - A more elaborate call. - {{"output" | printf "%s" | printf "%q"}} - A longer chain. - {{with "output"}}{{printf "%q" .}}{{end}} - A with action using dot. - {{with $x := "output" | printf "%q"}}{{$x}}{{end}} - A with action that creates and uses a variable. - {{with $x := "output"}}{{printf "%q" $x}}{{end}} - A with action that uses the variable in another action. - {{with $x := "output"}}{{$x | printf "%q"}}{{end}} - The same, but pipelined. - -Functions - -During execution functions are found in two function maps: first in the -template, then in the global function map. By default, no functions are defined -in the template but the Funcs method can be used to add them. - -Predefined global functions are named as follows. - - and - Returns the boolean AND of its arguments by returning the - first empty argument or the last argument, that is, - "and x y" behaves as "if x then y else x". All the - arguments are evaluated. - call - Returns the result of calling the first argument, which - must be a function, with the remaining arguments as parameters. - Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where - Y is a func-valued field, map entry, or the like. - The first argument must be the result of an evaluation - that yields a value of function type (as distinct from - a predefined function such as print). The function must - return either one or two result values, the second of which - is of type error. If the arguments don't match the function - or the returned error value is non-nil, execution stops. - html - Returns the escaped HTML equivalent of the textual - representation of its arguments. - index - Returns the result of indexing its first argument by the - following arguments. Thus "index x 1 2 3" is, in Go syntax, - x[1][2][3]. Each indexed item must be a map, slice, or array. - js - Returns the escaped JavaScript equivalent of the textual - representation of its arguments. - len - Returns the integer length of its argument. - not - Returns the boolean negation of its single argument. - or - Returns the boolean OR of its arguments by returning the - first non-empty argument or the last argument, that is, - "or x y" behaves as "if x then x else y". All the - arguments are evaluated. - print - An alias for fmt.Sprint - printf - An alias for fmt.Sprintf - println - An alias for fmt.Sprintln - urlquery - Returns the escaped value of the textual representation of - its arguments in a form suitable for embedding in a URL query. - -The boolean functions take any zero value to be false and a non-zero -value to be true. - -There is also a set of binary comparison operators defined as -functions: - - eq - Returns the boolean truth of arg1 == arg2 - ne - Returns the boolean truth of arg1 != arg2 - lt - Returns the boolean truth of arg1 < arg2 - le - Returns the boolean truth of arg1 <= arg2 - gt - Returns the boolean truth of arg1 > arg2 - ge - Returns the boolean truth of arg1 >= arg2 - -For simpler multi-way equality tests, eq (only) accepts two or more -arguments and compares the second and subsequent to the first, -returning in effect - - arg1==arg2 || arg1==arg3 || arg1==arg4 ... - -(Unlike with || in Go, however, eq is a function call and all the -arguments will be evaluated.) - -The comparison functions work on basic types only (or named basic -types, such as "type Celsius float32"). They implement the Go rules -for comparison of values, except that size and exact type are -ignored, so any integer value, signed or unsigned, may be compared -with any other integer value. (The arithmetic value is compared, -not the bit pattern, so all negative integers are less than all -unsigned integers.) However, as usual, one may not compare an int -with a float32 and so on. - -Associated templates - -Each template is named by a string specified when it is created. Also, each -template is associated with zero or more other templates that it may invoke by -name; such associations are transitive and form a name space of templates. - -A template may use a template invocation to instantiate another associated -template; see the explanation of the "template" action above. The name must be -that of a template associated with the template that contains the invocation. - -Nested template definitions - -When parsing a template, another template may be defined and associated with the -template being parsed. Template definitions must appear at the top level of the -template, much like global variables in a Go program. - -The syntax of such definitions is to surround each template declaration with a -"define" and "end" action. - -The define action names the template being created by providing a string -constant. Here is a simple example: - - `{{define "T1"}}ONE{{end}} - {{define "T2"}}TWO{{end}} - {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}} - {{template "T3"}}` - -This defines two templates, T1 and T2, and a third T3 that invokes the other two -when it is executed. Finally it invokes T3. If executed this template will -produce the text - - ONE TWO - -By construction, a template may reside in only one association. If it's -necessary to have a template addressable from multiple associations, the -template definition must be parsed multiple times to create distinct *Template -values, or must be copied with the Clone or AddParseTree method. - -Parse may be called multiple times to assemble the various associated templates; -see the ParseFiles and ParseGlob functions and methods for simple ways to parse -related templates stored in files. - -A template may be executed directly or through ExecuteTemplate, which executes -an associated template identified by name. To invoke our example above, we -might write, - - err := tmpl.Execute(os.Stdout, "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -or to invoke a particular template explicitly by name, - - err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed") - if err != nil { - log.Fatalf("execution failed: %s", err) - } - -*/ -package template diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/example_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/example_test.go deleted file mode 100644 index 170d706e3..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/example_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template_test - -import ( - "github.com/alecthomas/template" - "log" - "os" -) - -func ExampleTemplate() { - // Define a template. - const letter = ` -Dear {{.Name}}, -{{if .Attended}} -It was a pleasure to see you at the wedding.{{else}} -It is a shame you couldn't make it to the wedding.{{end}} -{{with .Gift}}Thank you for the lovely {{.}}. -{{end}} -Best wishes, -Josie -` - - // Prepare some data to insert into the template. - type Recipient struct { - Name, Gift string - Attended bool - } - var recipients = []Recipient{ - {"Aunt Mildred", "bone china tea set", true}, - {"Uncle John", "moleskin pants", false}, - {"Cousin Rodney", "", false}, - } - - // Create a new template and parse the letter into it. - t := template.Must(template.New("letter").Parse(letter)) - - // Execute the template for each recipient. - for _, r := range recipients { - err := t.Execute(os.Stdout, r) - if err != nil { - log.Println("executing template:", err) - } - } - - // Output: - // Dear Aunt Mildred, - // - // It was a pleasure to see you at the wedding. - // Thank you for the lovely bone china tea set. - // - // Best wishes, - // Josie - // - // Dear Uncle John, - // - // It is a shame you couldn't make it to the wedding. - // Thank you for the lovely moleskin pants. - // - // Best wishes, - // Josie - // - // Dear Cousin Rodney, - // - // It is a shame you couldn't make it to the wedding. - // - // Best wishes, - // Josie -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/examplefiles_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/examplefiles_test.go deleted file mode 100644 index c1540739e..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/examplefiles_test.go +++ /dev/null @@ -1,182 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template_test - -import ( - "github.com/alecthomas/template" - "io" - "io/ioutil" - "log" - "os" - "path/filepath" -) - -// templateFile defines the contents of a template to be stored in a file, for testing. -type templateFile struct { - name string - contents string -} - -func createTestDir(files []templateFile) string { - dir, err := ioutil.TempDir("", "template") - if err != nil { - log.Fatal(err) - } - for _, file := range files { - f, err := os.Create(filepath.Join(dir, file.name)) - if err != nil { - log.Fatal(err) - } - defer f.Close() - _, err = io.WriteString(f, file.contents) - if err != nil { - log.Fatal(err) - } - } - return dir -} - -// Here we demonstrate loading a set of templates from a directory. -func ExampleTemplate_glob() { - // Here we create a temporary directory and populate it with our sample - // template definition files; usually the template files would already - // exist in some location known to the program. - dir := createTestDir([]templateFile{ - // T0.tmpl is a plain template file that just invokes T1. - {"T0.tmpl", `T0 invokes T1: ({{template "T1"}})`}, - // T1.tmpl defines a template, T1 that invokes T2. - {"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`}, - // T2.tmpl defines a template T2. - {"T2.tmpl", `{{define "T2"}}This is T2{{end}}`}, - }) - // Clean up after the test; another quirk of running as an example. - defer os.RemoveAll(dir) - - // pattern is the glob pattern used to find all the template files. - pattern := filepath.Join(dir, "*.tmpl") - - // Here starts the example proper. - // T0.tmpl is the first name matched, so it becomes the starting template, - // the value returned by ParseGlob. - tmpl := template.Must(template.ParseGlob(pattern)) - - err := tmpl.Execute(os.Stdout, nil) - if err != nil { - log.Fatalf("template execution: %s", err) - } - // Output: - // T0 invokes T1: (T1 invokes T2: (This is T2)) -} - -// This example demonstrates one way to share some templates -// and use them in different contexts. In this variant we add multiple driver -// templates by hand to an existing bundle of templates. -func ExampleTemplate_helpers() { - // Here we create a temporary directory and populate it with our sample - // template definition files; usually the template files would already - // exist in some location known to the program. - dir := createTestDir([]templateFile{ - // T1.tmpl defines a template, T1 that invokes T2. - {"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`}, - // T2.tmpl defines a template T2. - {"T2.tmpl", `{{define "T2"}}This is T2{{end}}`}, - }) - // Clean up after the test; another quirk of running as an example. - defer os.RemoveAll(dir) - - // pattern is the glob pattern used to find all the template files. - pattern := filepath.Join(dir, "*.tmpl") - - // Here starts the example proper. - // Load the helpers. - templates := template.Must(template.ParseGlob(pattern)) - // Add one driver template to the bunch; we do this with an explicit template definition. - _, err := templates.Parse("{{define `driver1`}}Driver 1 calls T1: ({{template `T1`}})\n{{end}}") - if err != nil { - log.Fatal("parsing driver1: ", err) - } - // Add another driver template. - _, err = templates.Parse("{{define `driver2`}}Driver 2 calls T2: ({{template `T2`}})\n{{end}}") - if err != nil { - log.Fatal("parsing driver2: ", err) - } - // We load all the templates before execution. This package does not require - // that behavior but html/template's escaping does, so it's a good habit. - err = templates.ExecuteTemplate(os.Stdout, "driver1", nil) - if err != nil { - log.Fatalf("driver1 execution: %s", err) - } - err = templates.ExecuteTemplate(os.Stdout, "driver2", nil) - if err != nil { - log.Fatalf("driver2 execution: %s", err) - } - // Output: - // Driver 1 calls T1: (T1 invokes T2: (This is T2)) - // Driver 2 calls T2: (This is T2) -} - -// This example demonstrates how to use one group of driver -// templates with distinct sets of helper templates. -func ExampleTemplate_share() { - // Here we create a temporary directory and populate it with our sample - // template definition files; usually the template files would already - // exist in some location known to the program. - dir := createTestDir([]templateFile{ - // T0.tmpl is a plain template file that just invokes T1. - {"T0.tmpl", "T0 ({{.}} version) invokes T1: ({{template `T1`}})\n"}, - // T1.tmpl defines a template, T1 that invokes T2. Note T2 is not defined - {"T1.tmpl", `{{define "T1"}}T1 invokes T2: ({{template "T2"}}){{end}}`}, - }) - // Clean up after the test; another quirk of running as an example. - defer os.RemoveAll(dir) - - // pattern is the glob pattern used to find all the template files. - pattern := filepath.Join(dir, "*.tmpl") - - // Here starts the example proper. - // Load the drivers. - drivers := template.Must(template.ParseGlob(pattern)) - - // We must define an implementation of the T2 template. First we clone - // the drivers, then add a definition of T2 to the template name space. - - // 1. Clone the helper set to create a new name space from which to run them. - first, err := drivers.Clone() - if err != nil { - log.Fatal("cloning helpers: ", err) - } - // 2. Define T2, version A, and parse it. - _, err = first.Parse("{{define `T2`}}T2, version A{{end}}") - if err != nil { - log.Fatal("parsing T2: ", err) - } - - // Now repeat the whole thing, using a different version of T2. - // 1. Clone the drivers. - second, err := drivers.Clone() - if err != nil { - log.Fatal("cloning drivers: ", err) - } - // 2. Define T2, version B, and parse it. - _, err = second.Parse("{{define `T2`}}T2, version B{{end}}") - if err != nil { - log.Fatal("parsing T2: ", err) - } - - // Execute the templates in the reverse order to verify the - // first is unaffected by the second. - err = second.ExecuteTemplate(os.Stdout, "T0.tmpl", "second") - if err != nil { - log.Fatalf("second execution: %s", err) - } - err = first.ExecuteTemplate(os.Stdout, "T0.tmpl", "first") - if err != nil { - log.Fatalf("first: execution: %s", err) - } - - // Output: - // T0 (second version) invokes T1: (T1 invokes T2: (T2, version B)) - // T0 (first version) invokes T1: (T1 invokes T2: (T2, version A)) -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/examplefunc_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/examplefunc_test.go deleted file mode 100644 index 4f38f8d6f..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/examplefunc_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template_test - -import ( - "github.com/alecthomas/template" - "log" - "os" - "strings" -) - -// This example demonstrates a custom function to process template text. -// It installs the strings.Title function and uses it to -// Make Title Text Look Good In Our Template's Output. -func ExampleTemplate_func() { - // First we create a FuncMap with which to register the function. - funcMap := template.FuncMap{ - // The name "title" is what the function will be called in the template text. - "title": strings.Title, - } - - // A simple template definition to test our function. - // We print the input text several ways: - // - the original - // - title-cased - // - title-cased and then printed with %q - // - printed with %q and then title-cased. - const templateText = ` -Input: {{printf "%q" .}} -Output 0: {{title .}} -Output 1: {{title . | printf "%q"}} -Output 2: {{printf "%q" . | title}} -` - - // Create a template, add the function map, and parse the text. - tmpl, err := template.New("titleTest").Funcs(funcMap).Parse(templateText) - if err != nil { - log.Fatalf("parsing: %s", err) - } - - // Run the template to verify the output. - err = tmpl.Execute(os.Stdout, "the go programming language") - if err != nil { - log.Fatalf("execution: %s", err) - } - - // Output: - // Input: "the go programming language" - // Output 0: The Go Programming Language - // Output 1: "The Go Programming Language" - // Output 2: "The Go Programming Language" -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/exec.go b/Godeps/_workspace/src/github.com/alecthomas/template/exec.go deleted file mode 100644 index 8e0413157..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/exec.go +++ /dev/null @@ -1,844 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "fmt" - "github.com/alecthomas/template/parse" - "io" - "reflect" - "runtime" - "sort" - "strings" -) - -// state represents the state of an execution. It's not part of the -// template so that multiple executions of the same template -// can execute in parallel. -type state struct { - tmpl *Template - wr io.Writer - node parse.Node // current node, for errors - vars []variable // push-down stack of variable values. -} - -// variable holds the dynamic value of a variable such as $, $x etc. -type variable struct { - name string - value reflect.Value -} - -// push pushes a new variable on the stack. -func (s *state) push(name string, value reflect.Value) { - s.vars = append(s.vars, variable{name, value}) -} - -// mark returns the length of the variable stack. -func (s *state) mark() int { - return len(s.vars) -} - -// pop pops the variable stack up to the mark. -func (s *state) pop(mark int) { - s.vars = s.vars[0:mark] -} - -// setVar overwrites the top-nth variable on the stack. Used by range iterations. -func (s *state) setVar(n int, value reflect.Value) { - s.vars[len(s.vars)-n].value = value -} - -// varValue returns the value of the named variable. -func (s *state) varValue(name string) reflect.Value { - for i := s.mark() - 1; i >= 0; i-- { - if s.vars[i].name == name { - return s.vars[i].value - } - } - s.errorf("undefined variable: %s", name) - return zero -} - -var zero reflect.Value - -// at marks the state to be on node n, for error reporting. -func (s *state) at(node parse.Node) { - s.node = node -} - -// doublePercent returns the string with %'s replaced by %%, if necessary, -// so it can be used safely inside a Printf format string. -func doublePercent(str string) string { - if strings.Contains(str, "%") { - str = strings.Replace(str, "%", "%%", -1) - } - return str -} - -// errorf formats the error and terminates processing. -func (s *state) errorf(format string, args ...interface{}) { - name := doublePercent(s.tmpl.Name()) - if s.node == nil { - format = fmt.Sprintf("template: %s: %s", name, format) - } else { - location, context := s.tmpl.ErrorContext(s.node) - format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format) - } - panic(fmt.Errorf(format, args...)) -} - -// errRecover is the handler that turns panics into returns from the top -// level of Parse. -func errRecover(errp *error) { - e := recover() - if e != nil { - switch err := e.(type) { - case runtime.Error: - panic(e) - case error: - *errp = err - default: - panic(e) - } - } -} - -// ExecuteTemplate applies the template associated with t that has the given name -// to the specified data object and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error { - tmpl := t.tmpl[name] - if tmpl == nil { - return fmt.Errorf("template: no template %q associated with template %q", name, t.name) - } - return tmpl.Execute(wr, data) -} - -// Execute applies a parsed template to the specified data object, -// and writes the output to wr. -// If an error occurs executing the template or writing its output, -// execution stops, but partial results may already have been written to -// the output writer. -// A template may be executed safely in parallel. -func (t *Template) Execute(wr io.Writer, data interface{}) (err error) { - defer errRecover(&err) - value := reflect.ValueOf(data) - state := &state{ - tmpl: t, - wr: wr, - vars: []variable{{"$", value}}, - } - t.init() - if t.Tree == nil || t.Root == nil { - var b bytes.Buffer - for name, tmpl := range t.tmpl { - if tmpl.Tree == nil || tmpl.Root == nil { - continue - } - if b.Len() > 0 { - b.WriteString(", ") - } - fmt.Fprintf(&b, "%q", name) - } - var s string - if b.Len() > 0 { - s = "; defined templates are: " + b.String() - } - state.errorf("%q is an incomplete or empty template%s", t.Name(), s) - } - state.walk(value, t.Root) - return -} - -// Walk functions step through the major pieces of the template structure, -// generating output as they go. -func (s *state) walk(dot reflect.Value, node parse.Node) { - s.at(node) - switch node := node.(type) { - case *parse.ActionNode: - // Do not pop variables so they persist until next end. - // Also, if the action declares variables, don't print the result. - val := s.evalPipeline(dot, node.Pipe) - if len(node.Pipe.Decl) == 0 { - s.printValue(node, val) - } - case *parse.IfNode: - s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList) - case *parse.ListNode: - for _, node := range node.Nodes { - s.walk(dot, node) - } - case *parse.RangeNode: - s.walkRange(dot, node) - case *parse.TemplateNode: - s.walkTemplate(dot, node) - case *parse.TextNode: - if _, err := s.wr.Write(node.Text); err != nil { - s.errorf("%s", err) - } - case *parse.WithNode: - s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList) - default: - s.errorf("unknown node: %s", node) - } -} - -// walkIfOrWith walks an 'if' or 'with' node. The two control structures -// are identical in behavior except that 'with' sets dot. -func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) { - defer s.pop(s.mark()) - val := s.evalPipeline(dot, pipe) - truth, ok := isTrue(val) - if !ok { - s.errorf("if/with can't use %v", val) - } - if truth { - if typ == parse.NodeWith { - s.walk(val, list) - } else { - s.walk(dot, list) - } - } else if elseList != nil { - s.walk(dot, elseList) - } -} - -// isTrue reports whether the value is 'true', in the sense of not the zero of its type, -// and whether the value has a meaningful truth value. -func isTrue(val reflect.Value) (truth, ok bool) { - if !val.IsValid() { - // Something like var x interface{}, never set. It's a form of nil. - return false, true - } - switch val.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - truth = val.Len() > 0 - case reflect.Bool: - truth = val.Bool() - case reflect.Complex64, reflect.Complex128: - truth = val.Complex() != 0 - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface: - truth = !val.IsNil() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - truth = val.Int() != 0 - case reflect.Float32, reflect.Float64: - truth = val.Float() != 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - truth = val.Uint() != 0 - case reflect.Struct: - truth = true // Struct values are always true. - default: - return - } - return truth, true -} - -func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) { - s.at(r) - defer s.pop(s.mark()) - val, _ := indirect(s.evalPipeline(dot, r.Pipe)) - // mark top of stack before any variables in the body are pushed. - mark := s.mark() - oneIteration := func(index, elem reflect.Value) { - // Set top var (lexically the second if there are two) to the element. - if len(r.Pipe.Decl) > 0 { - s.setVar(1, elem) - } - // Set next var (lexically the first if there are two) to the index. - if len(r.Pipe.Decl) > 1 { - s.setVar(2, index) - } - s.walk(elem, r.List) - s.pop(mark) - } - switch val.Kind() { - case reflect.Array, reflect.Slice: - if val.Len() == 0 { - break - } - for i := 0; i < val.Len(); i++ { - oneIteration(reflect.ValueOf(i), val.Index(i)) - } - return - case reflect.Map: - if val.Len() == 0 { - break - } - for _, key := range sortKeys(val.MapKeys()) { - oneIteration(key, val.MapIndex(key)) - } - return - case reflect.Chan: - if val.IsNil() { - break - } - i := 0 - for ; ; i++ { - elem, ok := val.Recv() - if !ok { - break - } - oneIteration(reflect.ValueOf(i), elem) - } - if i == 0 { - break - } - return - case reflect.Invalid: - break // An invalid value is likely a nil map, etc. and acts like an empty map. - default: - s.errorf("range can't iterate over %v", val) - } - if r.ElseList != nil { - s.walk(dot, r.ElseList) - } -} - -func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) { - s.at(t) - tmpl := s.tmpl.tmpl[t.Name] - if tmpl == nil { - s.errorf("template %q not defined", t.Name) - } - // Variables declared by the pipeline persist. - dot = s.evalPipeline(dot, t.Pipe) - newState := *s - newState.tmpl = tmpl - // No dynamic scoping: template invocations inherit no variables. - newState.vars = []variable{{"$", dot}} - newState.walk(dot, tmpl.Root) -} - -// Eval functions evaluate pipelines, commands, and their elements and extract -// values from the data structure by examining fields, calling methods, and so on. -// The printing of those values happens only through walk functions. - -// evalPipeline returns the value acquired by evaluating a pipeline. If the -// pipeline has a variable declaration, the variable will be pushed on the -// stack. Callers should therefore pop the stack after they are finished -// executing commands depending on the pipeline value. -func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) { - if pipe == nil { - return - } - s.at(pipe) - for _, cmd := range pipe.Cmds { - value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg. - // If the object has type interface{}, dig down one level to the thing inside. - if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 { - value = reflect.ValueOf(value.Interface()) // lovely! - } - } - for _, variable := range pipe.Decl { - s.push(variable.Ident[0], value) - } - return value -} - -func (s *state) notAFunction(args []parse.Node, final reflect.Value) { - if len(args) > 1 || final.IsValid() { - s.errorf("can't give argument to non-function %s", args[0]) - } -} - -func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value { - firstWord := cmd.Args[0] - switch n := firstWord.(type) { - case *parse.FieldNode: - return s.evalFieldNode(dot, n, cmd.Args, final) - case *parse.ChainNode: - return s.evalChainNode(dot, n, cmd.Args, final) - case *parse.IdentifierNode: - // Must be a function. - return s.evalFunction(dot, n, cmd, cmd.Args, final) - case *parse.PipeNode: - // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored. - return s.evalPipeline(dot, n) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, cmd.Args, final) - } - s.at(firstWord) - s.notAFunction(cmd.Args, final) - switch word := firstWord.(type) { - case *parse.BoolNode: - return reflect.ValueOf(word.True) - case *parse.DotNode: - return dot - case *parse.NilNode: - s.errorf("nil is not a command") - case *parse.NumberNode: - return s.idealConstant(word) - case *parse.StringNode: - return reflect.ValueOf(word.Text) - } - s.errorf("can't evaluate command %q", firstWord) - panic("not reached") -} - -// idealConstant is called to return the value of a number in a context where -// we don't know the type. In that case, the syntax of the number tells us -// its type, and we use Go rules to resolve. Note there is no such thing as -// a uint ideal constant in this situation - the value must be of int type. -func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value { - // These are ideal constants but we don't know the type - // and we have no context. (If it was a method argument, - // we'd know what we need.) The syntax guides us to some extent. - s.at(constant) - switch { - case constant.IsComplex: - return reflect.ValueOf(constant.Complex128) // incontrovertible. - case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0: - return reflect.ValueOf(constant.Float64) - case constant.IsInt: - n := int(constant.Int64) - if int64(n) != constant.Int64 { - s.errorf("%s overflows int", constant.Text) - } - return reflect.ValueOf(n) - case constant.IsUint: - s.errorf("%s overflows int", constant.Text) - } - return zero -} - -func isHexConstant(s string) bool { - return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') -} - -func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(field) - return s.evalFieldChain(dot, dot, field, field.Ident, args, final) -} - -func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value { - s.at(chain) - // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields. - pipe := s.evalArg(dot, nil, chain.Node) - if len(chain.Field) == 0 { - s.errorf("internal error: no fields in evalChainNode") - } - return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final) -} - -func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value { - // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields. - s.at(variable) - value := s.varValue(variable.Ident[0]) - if len(variable.Ident) == 1 { - s.notAFunction(args, final) - return value - } - return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final) -} - -// evalFieldChain evaluates .X.Y.Z possibly followed by arguments. -// dot is the environment in which to evaluate arguments, while -// receiver is the value being walked along the chain. -func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value { - n := len(ident) - for i := 0; i < n-1; i++ { - receiver = s.evalField(dot, ident[i], node, nil, zero, receiver) - } - // Now if it's a method, it gets the arguments. - return s.evalField(dot, ident[n-1], node, args, final, receiver) -} - -func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value { - s.at(node) - name := node.Ident - function, ok := findFunction(name, s.tmpl) - if !ok { - s.errorf("%q is not a defined function", name) - } - return s.evalCall(dot, function, cmd, name, args, final) -} - -// evalField evaluates an expression like (.Field) or (.Field arg1 arg2). -// The 'final' argument represents the return value from the preceding -// value of the pipeline, if any. -func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value { - if !receiver.IsValid() { - return zero - } - typ := receiver.Type() - receiver, _ = indirect(receiver) - // Unless it's an interface, need to get to a value of type *T to guarantee - // we see all methods of T and *T. - ptr := receiver - if ptr.Kind() != reflect.Interface && ptr.CanAddr() { - ptr = ptr.Addr() - } - if method := ptr.MethodByName(fieldName); method.IsValid() { - return s.evalCall(dot, method, node, fieldName, args, final) - } - hasArgs := len(args) > 1 || final.IsValid() - // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil. - receiver, isNil := indirect(receiver) - if isNil { - s.errorf("nil pointer evaluating %s.%s", typ, fieldName) - } - switch receiver.Kind() { - case reflect.Struct: - tField, ok := receiver.Type().FieldByName(fieldName) - if ok { - field := receiver.FieldByIndex(tField.Index) - if tField.PkgPath != "" { // field is unexported - s.errorf("%s is an unexported field of struct type %s", fieldName, typ) - } - // If it's a function, we must call it. - if hasArgs { - s.errorf("%s has arguments but cannot be invoked as function", fieldName) - } - return field - } - s.errorf("%s is not a field of struct type %s", fieldName, typ) - case reflect.Map: - // If it's a map, attempt to use the field name as a key. - nameVal := reflect.ValueOf(fieldName) - if nameVal.Type().AssignableTo(receiver.Type().Key()) { - if hasArgs { - s.errorf("%s is not a method but has arguments", fieldName) - } - return receiver.MapIndex(nameVal) - } - } - s.errorf("can't evaluate field %s in type %s", fieldName, typ) - panic("not reached") -} - -var ( - errorType = reflect.TypeOf((*error)(nil)).Elem() - fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem() -) - -// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so -// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0] -// as the function itself. -func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value { - if args != nil { - args = args[1:] // Zeroth arg is function name/node; not passed to function. - } - typ := fun.Type() - numIn := len(args) - if final.IsValid() { - numIn++ - } - numFixed := len(args) - if typ.IsVariadic() { - numFixed = typ.NumIn() - 1 // last arg is the variadic one. - if numIn < numFixed { - s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args)) - } - } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() { - s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args)) - } - if !goodFunc(typ) { - // TODO: This could still be a confusing error; maybe goodFunc should provide info. - s.errorf("can't call method/function %q with %d results", name, typ.NumOut()) - } - // Build the arg list. - argv := make([]reflect.Value, numIn) - // Args must be evaluated. Fixed args first. - i := 0 - for ; i < numFixed && i < len(args); i++ { - argv[i] = s.evalArg(dot, typ.In(i), args[i]) - } - // Now the ... args. - if typ.IsVariadic() { - argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice. - for ; i < len(args); i++ { - argv[i] = s.evalArg(dot, argType, args[i]) - } - } - // Add final value if necessary. - if final.IsValid() { - t := typ.In(typ.NumIn() - 1) - if typ.IsVariadic() { - t = t.Elem() - } - argv[i] = s.validateType(final, t) - } - result := fun.Call(argv) - // If we have an error that is not nil, stop execution and return that error to the caller. - if len(result) == 2 && !result[1].IsNil() { - s.at(node) - s.errorf("error calling %s: %s", name, result[1].Interface().(error)) - } - return result[0] -} - -// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero. -func canBeNil(typ reflect.Type) bool { - switch typ.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -// validateType guarantees that the value is valid and assignable to the type. -func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value { - if !value.IsValid() { - if typ == nil || canBeNil(typ) { - // An untyped nil interface{}. Accept as a proper nil value. - return reflect.Zero(typ) - } - s.errorf("invalid value; expected %s", typ) - } - if typ != nil && !value.Type().AssignableTo(typ) { - if value.Kind() == reflect.Interface && !value.IsNil() { - value = value.Elem() - if value.Type().AssignableTo(typ) { - return value - } - // fallthrough - } - // Does one dereference or indirection work? We could do more, as we - // do with method receivers, but that gets messy and method receivers - // are much more constrained, so it makes more sense there than here. - // Besides, one is almost always all you need. - switch { - case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ): - value = value.Elem() - if !value.IsValid() { - s.errorf("dereference of nil pointer of type %s", typ) - } - case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr(): - value = value.Addr() - default: - s.errorf("wrong type for value; expected %s; got %s", typ, value.Type()) - } - } - return value -} - -func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - switch arg := n.(type) { - case *parse.DotNode: - return s.validateType(dot, typ) - case *parse.NilNode: - if canBeNil(typ) { - return reflect.Zero(typ) - } - s.errorf("cannot assign nil to %s", typ) - case *parse.FieldNode: - return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ) - case *parse.VariableNode: - return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ) - case *parse.PipeNode: - return s.validateType(s.evalPipeline(dot, arg), typ) - case *parse.IdentifierNode: - return s.evalFunction(dot, arg, arg, nil, zero) - case *parse.ChainNode: - return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ) - } - switch typ.Kind() { - case reflect.Bool: - return s.evalBool(typ, n) - case reflect.Complex64, reflect.Complex128: - return s.evalComplex(typ, n) - case reflect.Float32, reflect.Float64: - return s.evalFloat(typ, n) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return s.evalInteger(typ, n) - case reflect.Interface: - if typ.NumMethod() == 0 { - return s.evalEmptyInterface(dot, n) - } - case reflect.String: - return s.evalString(typ, n) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return s.evalUnsignedInteger(typ, n) - } - s.errorf("can't handle %s for arg of type %s", n, typ) - panic("not reached") -} - -func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.BoolNode); ok { - value := reflect.New(typ).Elem() - value.SetBool(n.True) - return value - } - s.errorf("expected bool; found %s", n) - panic("not reached") -} - -func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.StringNode); ok { - value := reflect.New(typ).Elem() - value.SetString(n.Text) - return value - } - s.errorf("expected string; found %s", n) - panic("not reached") -} - -func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsInt { - value := reflect.New(typ).Elem() - value.SetInt(n.Int64) - return value - } - s.errorf("expected integer; found %s", n) - panic("not reached") -} - -func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsUint { - value := reflect.New(typ).Elem() - value.SetUint(n.Uint64) - return value - } - s.errorf("expected unsigned integer; found %s", n) - panic("not reached") -} - -func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value { - s.at(n) - if n, ok := n.(*parse.NumberNode); ok && n.IsFloat { - value := reflect.New(typ).Elem() - value.SetFloat(n.Float64) - return value - } - s.errorf("expected float; found %s", n) - panic("not reached") -} - -func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value { - if n, ok := n.(*parse.NumberNode); ok && n.IsComplex { - value := reflect.New(typ).Elem() - value.SetComplex(n.Complex128) - return value - } - s.errorf("expected complex; found %s", n) - panic("not reached") -} - -func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value { - s.at(n) - switch n := n.(type) { - case *parse.BoolNode: - return reflect.ValueOf(n.True) - case *parse.DotNode: - return dot - case *parse.FieldNode: - return s.evalFieldNode(dot, n, nil, zero) - case *parse.IdentifierNode: - return s.evalFunction(dot, n, n, nil, zero) - case *parse.NilNode: - // NilNode is handled in evalArg, the only place that calls here. - s.errorf("evalEmptyInterface: nil (can't happen)") - case *parse.NumberNode: - return s.idealConstant(n) - case *parse.StringNode: - return reflect.ValueOf(n.Text) - case *parse.VariableNode: - return s.evalVariableNode(dot, n, nil, zero) - case *parse.PipeNode: - return s.evalPipeline(dot, n) - } - s.errorf("can't handle assignment of %s to empty interface argument", n) - panic("not reached") -} - -// indirect returns the item at the end of indirection, and a bool to indicate if it's nil. -// We indirect through pointers and empty interfaces (only) because -// non-empty interfaces have methods we might need. -func indirect(v reflect.Value) (rv reflect.Value, isNil bool) { - for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() { - if v.IsNil() { - return v, true - } - if v.Kind() == reflect.Interface && v.NumMethod() > 0 { - break - } - } - return v, false -} - -// printValue writes the textual representation of the value to the output of -// the template. -func (s *state) printValue(n parse.Node, v reflect.Value) { - s.at(n) - iface, ok := printableValue(v) - if !ok { - s.errorf("can't print %s of type %s", n, v.Type()) - } - fmt.Fprint(s.wr, iface) -} - -// printableValue returns the, possibly indirected, interface value inside v that -// is best for a call to formatted printer. -func printableValue(v reflect.Value) (interface{}, bool) { - if v.Kind() == reflect.Ptr { - v, _ = indirect(v) // fmt.Fprint handles nil. - } - if !v.IsValid() { - return "", true - } - - if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) { - if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) { - v = v.Addr() - } else { - switch v.Kind() { - case reflect.Chan, reflect.Func: - return nil, false - } - } - } - return v.Interface(), true -} - -// Types to help sort the keys in a map for reproducible output. - -type rvs []reflect.Value - -func (x rvs) Len() int { return len(x) } -func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -type rvInts struct{ rvs } - -func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() } - -type rvUints struct{ rvs } - -func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() } - -type rvFloats struct{ rvs } - -func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() } - -type rvStrings struct{ rvs } - -func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() } - -// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys. -func sortKeys(v []reflect.Value) []reflect.Value { - if len(v) <= 1 { - return v - } - switch v[0].Kind() { - case reflect.Float32, reflect.Float64: - sort.Sort(rvFloats{v}) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - sort.Sort(rvInts{v}) - case reflect.String: - sort.Sort(rvStrings{v}) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - sort.Sort(rvUints{v}) - } - return v -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/exec_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/exec_test.go deleted file mode 100644 index 69c213ed2..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/exec_test.go +++ /dev/null @@ -1,1044 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "errors" - "flag" - "fmt" - "reflect" - "strings" - "testing" -) - -var debug = flag.Bool("debug", false, "show the errors produced by the tests") - -// T has lots of interesting pieces to use to test execution. -type T struct { - // Basics - True bool - I int - U16 uint16 - X string - FloatZero float64 - ComplexZero complex128 - // Nested structs. - U *U - // Struct with String method. - V0 V - V1, V2 *V - // Struct with Error method. - W0 W - W1, W2 *W - // Slices - SI []int - SIEmpty []int - SB []bool - // Maps - MSI map[string]int - MSIone map[string]int // one element, for deterministic output - MSIEmpty map[string]int - MXI map[interface{}]int - MII map[int]int - SMSI []map[string]int - // Empty interfaces; used to see if we can dig inside one. - Empty0 interface{} // nil - Empty1 interface{} - Empty2 interface{} - Empty3 interface{} - Empty4 interface{} - // Non-empty interface. - NonEmptyInterface I - // Stringer. - Str fmt.Stringer - Err error - // Pointers - PI *int - PS *string - PSI *[]int - NIL *int - // Function (not method) - BinaryFunc func(string, string) string - VariadicFunc func(...string) string - VariadicFuncInt func(int, ...string) string - NilOKFunc func(*int) bool - ErrFunc func() (string, error) - // Template to test evaluation of templates. - Tmpl *Template - // Unexported field; cannot be accessed by template. - unexported int -} - -type U struct { - V string -} - -type V struct { - j int -} - -func (v *V) String() string { - if v == nil { - return "nilV" - } - return fmt.Sprintf("<%d>", v.j) -} - -type W struct { - k int -} - -func (w *W) Error() string { - if w == nil { - return "nilW" - } - return fmt.Sprintf("[%d]", w.k) -} - -var tVal = &T{ - True: true, - I: 17, - U16: 16, - X: "x", - U: &U{"v"}, - V0: V{6666}, - V1: &V{7777}, // leave V2 as nil - W0: W{888}, - W1: &W{999}, // leave W2 as nil - SI: []int{3, 4, 5}, - SB: []bool{true, false}, - MSI: map[string]int{"one": 1, "two": 2, "three": 3}, - MSIone: map[string]int{"one": 1}, - MXI: map[interface{}]int{"one": 1}, - MII: map[int]int{1: 1}, - SMSI: []map[string]int{ - {"one": 1, "two": 2}, - {"eleven": 11, "twelve": 12}, - }, - Empty1: 3, - Empty2: "empty2", - Empty3: []int{7, 8}, - Empty4: &U{"UinEmpty"}, - NonEmptyInterface: new(T), - Str: bytes.NewBuffer([]byte("foozle")), - Err: errors.New("erroozle"), - PI: newInt(23), - PS: newString("a string"), - PSI: newIntSlice(21, 22, 23), - BinaryFunc: func(a, b string) string { return fmt.Sprintf("[%s=%s]", a, b) }, - VariadicFunc: func(s ...string) string { return fmt.Sprint("<", strings.Join(s, "+"), ">") }, - VariadicFuncInt: func(a int, s ...string) string { return fmt.Sprint(a, "=<", strings.Join(s, "+"), ">") }, - NilOKFunc: func(s *int) bool { return s == nil }, - ErrFunc: func() (string, error) { return "bla", nil }, - Tmpl: Must(New("x").Parse("test template")), // "x" is the value of .X -} - -// A non-empty interface. -type I interface { - Method0() string -} - -var iVal I = tVal - -// Helpers for creation. -func newInt(n int) *int { - return &n -} - -func newString(s string) *string { - return &s -} - -func newIntSlice(n ...int) *[]int { - p := new([]int) - *p = make([]int, len(n)) - copy(*p, n) - return p -} - -// Simple methods with and without arguments. -func (t *T) Method0() string { - return "M0" -} - -func (t *T) Method1(a int) int { - return a -} - -func (t *T) Method2(a uint16, b string) string { - return fmt.Sprintf("Method2: %d %s", a, b) -} - -func (t *T) Method3(v interface{}) string { - return fmt.Sprintf("Method3: %v", v) -} - -func (t *T) Copy() *T { - n := new(T) - *n = *t - return n -} - -func (t *T) MAdd(a int, b []int) []int { - v := make([]int, len(b)) - for i, x := range b { - v[i] = x + a - } - return v -} - -var myError = errors.New("my error") - -// MyError returns a value and an error according to its argument. -func (t *T) MyError(error bool) (bool, error) { - if error { - return true, myError - } - return false, nil -} - -// A few methods to test chaining. -func (t *T) GetU() *U { - return t.U -} - -func (u *U) TrueFalse(b bool) string { - if b { - return "true" - } - return "" -} - -func typeOf(arg interface{}) string { - return fmt.Sprintf("%T", arg) -} - -type execTest struct { - name string - input string - output string - data interface{} - ok bool -} - -// bigInt and bigUint are hex string representing numbers either side -// of the max int boundary. -// We do it this way so the test doesn't depend on ints being 32 bits. -var ( - bigInt = fmt.Sprintf("0x%x", int(1<", tVal, true}, - {"map .one interface", "{{.MXI.one}}", "1", tVal, true}, - {"map .WRONG args", "{{.MSI.one 1}}", "", tVal, false}, - {"map .WRONG type", "{{.MII.one}}", "", tVal, false}, - - // Dots of all kinds to test basic evaluation. - {"dot int", "<{{.}}>", "<13>", 13, true}, - {"dot uint", "<{{.}}>", "<14>", uint(14), true}, - {"dot float", "<{{.}}>", "<15.1>", 15.1, true}, - {"dot bool", "<{{.}}>", "", true, true}, - {"dot complex", "<{{.}}>", "<(16.2-17i)>", 16.2 - 17i, true}, - {"dot string", "<{{.}}>", "", "hello", true}, - {"dot slice", "<{{.}}>", "<[-1 -2 -3]>", []int{-1, -2, -3}, true}, - {"dot map", "<{{.}}>", "", map[string]int{"two": 22}, true}, - {"dot struct", "<{{.}}>", "<{7 seven}>", struct { - a int - b string - }{7, "seven"}, true}, - - // Variables. - {"$ int", "{{$}}", "123", 123, true}, - {"$.I", "{{$.I}}", "17", tVal, true}, - {"$.U.V", "{{$.U.V}}", "v", tVal, true}, - {"declare in action", "{{$x := $.U.V}}{{$x}}", "v", tVal, true}, - - // Type with String method. - {"V{6666}.String()", "-{{.V0}}-", "-<6666>-", tVal, true}, - {"&V{7777}.String()", "-{{.V1}}-", "-<7777>-", tVal, true}, - {"(*V)(nil).String()", "-{{.V2}}-", "-nilV-", tVal, true}, - - // Type with Error method. - {"W{888}.Error()", "-{{.W0}}-", "-[888]-", tVal, true}, - {"&W{999}.Error()", "-{{.W1}}-", "-[999]-", tVal, true}, - {"(*W)(nil).Error()", "-{{.W2}}-", "-nilW-", tVal, true}, - - // Pointers. - {"*int", "{{.PI}}", "23", tVal, true}, - {"*string", "{{.PS}}", "a string", tVal, true}, - {"*[]int", "{{.PSI}}", "[21 22 23]", tVal, true}, - {"*[]int[1]", "{{index .PSI 1}}", "22", tVal, true}, - {"NIL", "{{.NIL}}", "", tVal, true}, - - // Empty interfaces holding values. - {"empty nil", "{{.Empty0}}", "", tVal, true}, - {"empty with int", "{{.Empty1}}", "3", tVal, true}, - {"empty with string", "{{.Empty2}}", "empty2", tVal, true}, - {"empty with slice", "{{.Empty3}}", "[7 8]", tVal, true}, - {"empty with struct", "{{.Empty4}}", "{UinEmpty}", tVal, true}, - {"empty with struct, field", "{{.Empty4.V}}", "UinEmpty", tVal, true}, - - // Method calls. - {".Method0", "-{{.Method0}}-", "-M0-", tVal, true}, - {".Method1(1234)", "-{{.Method1 1234}}-", "-1234-", tVal, true}, - {".Method1(.I)", "-{{.Method1 .I}}-", "-17-", tVal, true}, - {".Method2(3, .X)", "-{{.Method2 3 .X}}-", "-Method2: 3 x-", tVal, true}, - {".Method2(.U16, `str`)", "-{{.Method2 .U16 `str`}}-", "-Method2: 16 str-", tVal, true}, - {".Method2(.U16, $x)", "{{if $x := .X}}-{{.Method2 .U16 $x}}{{end}}-", "-Method2: 16 x-", tVal, true}, - {".Method3(nil constant)", "-{{.Method3 nil}}-", "-Method3: -", tVal, true}, - {".Method3(nil value)", "-{{.Method3 .MXI.unset}}-", "-Method3: -", tVal, true}, - {"method on var", "{{if $x := .}}-{{$x.Method2 .U16 $x.X}}{{end}}-", "-Method2: 16 x-", tVal, true}, - {"method on chained var", - "{{range .MSIone}}{{if $.U.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}", - "true", tVal, true}, - {"chained method", - "{{range .MSIone}}{{if $.GetU.TrueFalse $.True}}{{$.U.TrueFalse $.True}}{{else}}WRONG{{end}}{{end}}", - "true", tVal, true}, - {"chained method on variable", - "{{with $x := .}}{{with .SI}}{{$.GetU.TrueFalse $.True}}{{end}}{{end}}", - "true", tVal, true}, - {".NilOKFunc not nil", "{{call .NilOKFunc .PI}}", "false", tVal, true}, - {".NilOKFunc nil", "{{call .NilOKFunc nil}}", "true", tVal, true}, - - // Function call builtin. - {".BinaryFunc", "{{call .BinaryFunc `1` `2`}}", "[1=2]", tVal, true}, - {".VariadicFunc0", "{{call .VariadicFunc}}", "<>", tVal, true}, - {".VariadicFunc2", "{{call .VariadicFunc `he` `llo`}}", "", tVal, true}, - {".VariadicFuncInt", "{{call .VariadicFuncInt 33 `he` `llo`}}", "33=", tVal, true}, - {"if .BinaryFunc call", "{{ if .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{end}}", "[1=2]", tVal, true}, - {"if not .BinaryFunc call", "{{ if not .BinaryFunc}}{{call .BinaryFunc `1` `2`}}{{else}}No{{end}}", "No", tVal, true}, - {"Interface Call", `{{stringer .S}}`, "foozle", map[string]interface{}{"S": bytes.NewBufferString("foozle")}, true}, - {".ErrFunc", "{{call .ErrFunc}}", "bla", tVal, true}, - - // Erroneous function calls (check args). - {".BinaryFuncTooFew", "{{call .BinaryFunc `1`}}", "", tVal, false}, - {".BinaryFuncTooMany", "{{call .BinaryFunc `1` `2` `3`}}", "", tVal, false}, - {".BinaryFuncBad0", "{{call .BinaryFunc 1 3}}", "", tVal, false}, - {".BinaryFuncBad1", "{{call .BinaryFunc `1` 3}}", "", tVal, false}, - {".VariadicFuncBad0", "{{call .VariadicFunc 3}}", "", tVal, false}, - {".VariadicFuncIntBad0", "{{call .VariadicFuncInt}}", "", tVal, false}, - {".VariadicFuncIntBad`", "{{call .VariadicFuncInt `x`}}", "", tVal, false}, - {".VariadicFuncNilBad", "{{call .VariadicFunc nil}}", "", tVal, false}, - - // Pipelines. - {"pipeline", "-{{.Method0 | .Method2 .U16}}-", "-Method2: 16 M0-", tVal, true}, - {"pipeline func", "-{{call .VariadicFunc `llo` | call .VariadicFunc `he` }}-", "->-", tVal, true}, - - // Parenthesized expressions - {"parens in pipeline", "{{printf `%d %d %d` (1) (2 | add 3) (add 4 (add 5 6))}}", "1 5 15", tVal, true}, - - // Parenthesized expressions with field accesses - {"parens: $ in paren", "{{($).X}}", "x", tVal, true}, - {"parens: $.GetU in paren", "{{($.GetU).V}}", "v", tVal, true}, - {"parens: $ in paren in pipe", "{{($ | echo).X}}", "x", tVal, true}, - {"parens: spaces and args", `{{(makemap "up" "down" "left" "right").left}}`, "right", tVal, true}, - - // If. - {"if true", "{{if true}}TRUE{{end}}", "TRUE", tVal, true}, - {"if false", "{{if false}}TRUE{{else}}FALSE{{end}}", "FALSE", tVal, true}, - {"if nil", "{{if nil}}TRUE{{end}}", "", tVal, false}, - {"if 1", "{{if 1}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true}, - {"if 0", "{{if 0}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if 1.5", "{{if 1.5}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true}, - {"if 0.0", "{{if .FloatZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if 1.5i", "{{if 1.5i}}NON-ZERO{{else}}ZERO{{end}}", "NON-ZERO", tVal, true}, - {"if 0.0i", "{{if .ComplexZero}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if emptystring", "{{if ``}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"if string", "{{if `notempty`}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true}, - {"if emptyslice", "{{if .SIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"if slice", "{{if .SI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true}, - {"if emptymap", "{{if .MSIEmpty}}NON-EMPTY{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"if map", "{{if .MSI}}NON-EMPTY{{else}}EMPTY{{end}}", "NON-EMPTY", tVal, true}, - {"if map unset", "{{if .MXI.none}}NON-ZERO{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"if map not unset", "{{if not .MXI.none}}ZERO{{else}}NON-ZERO{{end}}", "ZERO", tVal, true}, - {"if $x with $y int", "{{if $x := true}}{{with $y := .I}}{{$x}},{{$y}}{{end}}{{end}}", "true,17", tVal, true}, - {"if $x with $x int", "{{if $x := true}}{{with $x := .I}}{{$x}},{{end}}{{$x}}{{end}}", "17,true", tVal, true}, - {"if else if", "{{if false}}FALSE{{else if true}}TRUE{{end}}", "TRUE", tVal, true}, - {"if else chain", "{{if eq 1 3}}1{{else if eq 2 3}}2{{else if eq 3 3}}3{{end}}", "3", tVal, true}, - - // Print etc. - {"print", `{{print "hello, print"}}`, "hello, print", tVal, true}, - {"print 123", `{{print 1 2 3}}`, "1 2 3", tVal, true}, - {"print nil", `{{print nil}}`, "", tVal, true}, - {"println", `{{println 1 2 3}}`, "1 2 3\n", tVal, true}, - {"printf int", `{{printf "%04x" 127}}`, "007f", tVal, true}, - {"printf float", `{{printf "%g" 3.5}}`, "3.5", tVal, true}, - {"printf complex", `{{printf "%g" 1+7i}}`, "(1+7i)", tVal, true}, - {"printf string", `{{printf "%s" "hello"}}`, "hello", tVal, true}, - {"printf function", `{{printf "%#q" zeroArgs}}`, "`zeroArgs`", tVal, true}, - {"printf field", `{{printf "%s" .U.V}}`, "v", tVal, true}, - {"printf method", `{{printf "%s" .Method0}}`, "M0", tVal, true}, - {"printf dot", `{{with .I}}{{printf "%d" .}}{{end}}`, "17", tVal, true}, - {"printf var", `{{with $x := .I}}{{printf "%d" $x}}{{end}}`, "17", tVal, true}, - {"printf lots", `{{printf "%d %s %g %s" 127 "hello" 7-3i .Method0}}`, "127 hello (7-3i) M0", tVal, true}, - - // HTML. - {"html", `{{html ""}}`, - "<script>alert("XSS");</script>", nil, true}, - {"html pipeline", `{{printf "" | html}}`, - "<script>alert("XSS");</script>", nil, true}, - {"html", `{{html .PS}}`, "a string", tVal, true}, - - // JavaScript. - {"js", `{{js .}}`, `It\'d be nice.`, `It'd be nice.`, true}, - - // URL query. - {"urlquery", `{{"http://www.example.org/"|urlquery}}`, "http%3A%2F%2Fwww.example.org%2F", nil, true}, - - // Booleans - {"not", "{{not true}} {{not false}}", "false true", nil, true}, - {"and", "{{and false 0}} {{and 1 0}} {{and 0 true}} {{and 1 1}}", "false 0 0 1", nil, true}, - {"or", "{{or 0 0}} {{or 1 0}} {{or 0 true}} {{or 1 1}}", "0 1 true 1", nil, true}, - {"boolean if", "{{if and true 1 `hi`}}TRUE{{else}}FALSE{{end}}", "TRUE", tVal, true}, - {"boolean if not", "{{if and true 1 `hi` | not}}TRUE{{else}}FALSE{{end}}", "FALSE", nil, true}, - - // Indexing. - {"slice[0]", "{{index .SI 0}}", "3", tVal, true}, - {"slice[1]", "{{index .SI 1}}", "4", tVal, true}, - {"slice[HUGE]", "{{index .SI 10}}", "", tVal, false}, - {"slice[WRONG]", "{{index .SI `hello`}}", "", tVal, false}, - {"map[one]", "{{index .MSI `one`}}", "1", tVal, true}, - {"map[two]", "{{index .MSI `two`}}", "2", tVal, true}, - {"map[NO]", "{{index .MSI `XXX`}}", "0", tVal, true}, - {"map[nil]", "{{index .MSI nil}}", "0", tVal, true}, - {"map[WRONG]", "{{index .MSI 10}}", "", tVal, false}, - {"double index", "{{index .SMSI 1 `eleven`}}", "11", tVal, true}, - - // Len. - {"slice", "{{len .SI}}", "3", tVal, true}, - {"map", "{{len .MSI }}", "3", tVal, true}, - {"len of int", "{{len 3}}", "", tVal, false}, - {"len of nothing", "{{len .Empty0}}", "", tVal, false}, - - // With. - {"with true", "{{with true}}{{.}}{{end}}", "true", tVal, true}, - {"with false", "{{with false}}{{.}}{{else}}FALSE{{end}}", "FALSE", tVal, true}, - {"with 1", "{{with 1}}{{.}}{{else}}ZERO{{end}}", "1", tVal, true}, - {"with 0", "{{with 0}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"with 1.5", "{{with 1.5}}{{.}}{{else}}ZERO{{end}}", "1.5", tVal, true}, - {"with 0.0", "{{with .FloatZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"with 1.5i", "{{with 1.5i}}{{.}}{{else}}ZERO{{end}}", "(0+1.5i)", tVal, true}, - {"with 0.0i", "{{with .ComplexZero}}{{.}}{{else}}ZERO{{end}}", "ZERO", tVal, true}, - {"with emptystring", "{{with ``}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"with string", "{{with `notempty`}}{{.}}{{else}}EMPTY{{end}}", "notempty", tVal, true}, - {"with emptyslice", "{{with .SIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"with slice", "{{with .SI}}{{.}}{{else}}EMPTY{{end}}", "[3 4 5]", tVal, true}, - {"with emptymap", "{{with .MSIEmpty}}{{.}}{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"with map", "{{with .MSIone}}{{.}}{{else}}EMPTY{{end}}", "map[one:1]", tVal, true}, - {"with empty interface, struct field", "{{with .Empty4}}{{.V}}{{end}}", "UinEmpty", tVal, true}, - {"with $x int", "{{with $x := .I}}{{$x}}{{end}}", "17", tVal, true}, - {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true}, - {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true}, - - // Range. - {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true}, - {"range empty no else", "{{range .SIEmpty}}-{{.}}-{{end}}", "", tVal, true}, - {"range []int else", "{{range .SI}}-{{.}}-{{else}}EMPTY{{end}}", "-3--4--5-", tVal, true}, - {"range empty else", "{{range .SIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"range []bool", "{{range .SB}}-{{.}}-{{end}}", "-true--false-", tVal, true}, - {"range []int method", "{{range .SI | .MAdd .I}}-{{.}}-{{end}}", "-20--21--22-", tVal, true}, - {"range map", "{{range .MSI}}-{{.}}-{{end}}", "-1--3--2-", tVal, true}, - {"range empty map no else", "{{range .MSIEmpty}}-{{.}}-{{end}}", "", tVal, true}, - {"range map else", "{{range .MSI}}-{{.}}-{{else}}EMPTY{{end}}", "-1--3--2-", tVal, true}, - {"range empty map else", "{{range .MSIEmpty}}-{{.}}-{{else}}EMPTY{{end}}", "EMPTY", tVal, true}, - {"range empty interface", "{{range .Empty3}}-{{.}}-{{else}}EMPTY{{end}}", "-7--8-", tVal, true}, - {"range empty nil", "{{range .Empty0}}-{{.}}-{{end}}", "", tVal, true}, - {"range $x SI", "{{range $x := .SI}}<{{$x}}>{{end}}", "<3><4><5>", tVal, true}, - {"range $x $y SI", "{{range $x, $y := .SI}}<{{$x}}={{$y}}>{{end}}", "<0=3><1=4><2=5>", tVal, true}, - {"range $x MSIone", "{{range $x := .MSIone}}<{{$x}}>{{end}}", "<1>", tVal, true}, - {"range $x $y MSIone", "{{range $x, $y := .MSIone}}<{{$x}}={{$y}}>{{end}}", "", tVal, true}, - {"range $x PSI", "{{range $x := .PSI}}<{{$x}}>{{end}}", "<21><22><23>", tVal, true}, - {"declare in range", "{{range $x := .PSI}}<{{$foo:=$x}}{{$x}}>{{end}}", "<21><22><23>", tVal, true}, - {"range count", `{{range $i, $x := count 5}}[{{$i}}]{{$x}}{{end}}`, "[0]a[1]b[2]c[3]d[4]e", tVal, true}, - {"range nil count", `{{range $i, $x := count 0}}{{else}}empty{{end}}`, "empty", tVal, true}, - - // Cute examples. - {"or as if true", `{{or .SI "slice is empty"}}`, "[3 4 5]", tVal, true}, - {"or as if false", `{{or .SIEmpty "slice is empty"}}`, "slice is empty", tVal, true}, - - // Error handling. - {"error method, error", "{{.MyError true}}", "", tVal, false}, - {"error method, no error", "{{.MyError false}}", "false", tVal, true}, - - // Fixed bugs. - // Must separate dot and receiver; otherwise args are evaluated with dot set to variable. - {"bug0", "{{range .MSIone}}{{if $.Method1 .}}X{{end}}{{end}}", "X", tVal, true}, - // Do not loop endlessly in indirect for non-empty interfaces. - // The bug appears with *interface only; looped forever. - {"bug1", "{{.Method0}}", "M0", &iVal, true}, - // Was taking address of interface field, so method set was empty. - {"bug2", "{{$.NonEmptyInterface.Method0}}", "M0", tVal, true}, - // Struct values were not legal in with - mere oversight. - {"bug3", "{{with $}}{{.Method0}}{{end}}", "M0", tVal, true}, - // Nil interface values in if. - {"bug4", "{{if .Empty0}}non-nil{{else}}nil{{end}}", "nil", tVal, true}, - // Stringer. - {"bug5", "{{.Str}}", "foozle", tVal, true}, - {"bug5a", "{{.Err}}", "erroozle", tVal, true}, - // Args need to be indirected and dereferenced sometimes. - {"bug6a", "{{vfunc .V0 .V1}}", "vfunc", tVal, true}, - {"bug6b", "{{vfunc .V0 .V0}}", "vfunc", tVal, true}, - {"bug6c", "{{vfunc .V1 .V0}}", "vfunc", tVal, true}, - {"bug6d", "{{vfunc .V1 .V1}}", "vfunc", tVal, true}, - // Legal parse but illegal execution: non-function should have no arguments. - {"bug7a", "{{3 2}}", "", tVal, false}, - {"bug7b", "{{$x := 1}}{{$x 2}}", "", tVal, false}, - {"bug7c", "{{$x := 1}}{{3 | $x}}", "", tVal, false}, - // Pipelined arg was not being type-checked. - {"bug8a", "{{3|oneArg}}", "", tVal, false}, - {"bug8b", "{{4|dddArg 3}}", "", tVal, false}, - // A bug was introduced that broke map lookups for lower-case names. - {"bug9", "{{.cause}}", "neglect", map[string]string{"cause": "neglect"}, true}, - // Field chain starting with function did not work. - {"bug10", "{{mapOfThree.three}}-{{(mapOfThree).three}}", "3-3", 0, true}, - // Dereferencing nil pointer while evaluating function arguments should not panic. Issue 7333. - {"bug11", "{{valueString .PS}}", "", T{}, false}, - // 0xef gave constant type float64. Issue 8622. - {"bug12xe", "{{printf `%T` 0xef}}", "int", T{}, true}, - {"bug12xE", "{{printf `%T` 0xEE}}", "int", T{}, true}, - {"bug12Xe", "{{printf `%T` 0Xef}}", "int", T{}, true}, - {"bug12XE", "{{printf `%T` 0XEE}}", "int", T{}, true}, - // Chained nodes did not work as arguments. Issue 8473. - {"bug13", "{{print (.Copy).I}}", "17", tVal, true}, -} - -func zeroArgs() string { - return "zeroArgs" -} - -func oneArg(a string) string { - return "oneArg=" + a -} - -func dddArg(a int, b ...string) string { - return fmt.Sprintln(a, b) -} - -// count returns a channel that will deliver n sequential 1-letter strings starting at "a" -func count(n int) chan string { - if n == 0 { - return nil - } - c := make(chan string) - go func() { - for i := 0; i < n; i++ { - c <- "abcdefghijklmnop"[i : i+1] - } - close(c) - }() - return c -} - -// vfunc takes a *V and a V -func vfunc(V, *V) string { - return "vfunc" -} - -// valueString takes a string, not a pointer. -func valueString(v string) string { - return "value is ignored" -} - -func add(args ...int) int { - sum := 0 - for _, x := range args { - sum += x - } - return sum -} - -func echo(arg interface{}) interface{} { - return arg -} - -func makemap(arg ...string) map[string]string { - if len(arg)%2 != 0 { - panic("bad makemap") - } - m := make(map[string]string) - for i := 0; i < len(arg); i += 2 { - m[arg[i]] = arg[i+1] - } - return m -} - -func stringer(s fmt.Stringer) string { - return s.String() -} - -func mapOfThree() interface{} { - return map[string]int{"three": 3} -} - -func testExecute(execTests []execTest, template *Template, t *testing.T) { - b := new(bytes.Buffer) - funcs := FuncMap{ - "add": add, - "count": count, - "dddArg": dddArg, - "echo": echo, - "makemap": makemap, - "mapOfThree": mapOfThree, - "oneArg": oneArg, - "stringer": stringer, - "typeOf": typeOf, - "valueString": valueString, - "vfunc": vfunc, - "zeroArgs": zeroArgs, - } - for _, test := range execTests { - var tmpl *Template - var err error - if template == nil { - tmpl, err = New(test.name).Funcs(funcs).Parse(test.input) - } else { - tmpl, err = template.New(test.name).Funcs(funcs).Parse(test.input) - } - if err != nil { - t.Errorf("%s: parse error: %s", test.name, err) - continue - } - b.Reset() - err = tmpl.Execute(b, test.data) - switch { - case !test.ok && err == nil: - t.Errorf("%s: expected error; got none", test.name) - continue - case test.ok && err != nil: - t.Errorf("%s: unexpected execute error: %s", test.name, err) - continue - case !test.ok && err != nil: - // expected error, got one - if *debug { - fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) - } - } - result := b.String() - if result != test.output { - t.Errorf("%s: expected\n\t%q\ngot\n\t%q", test.name, test.output, result) - } - } -} - -func TestExecute(t *testing.T) { - testExecute(execTests, nil, t) -} - -var delimPairs = []string{ - "", "", // default - "{{", "}}", // same as default - "<<", ">>", // distinct - "|", "|", // same - "(日)", "(本)", // peculiar -} - -func TestDelims(t *testing.T) { - const hello = "Hello, world" - var value = struct{ Str string }{hello} - for i := 0; i < len(delimPairs); i += 2 { - text := ".Str" - left := delimPairs[i+0] - trueLeft := left - right := delimPairs[i+1] - trueRight := right - if left == "" { // default case - trueLeft = "{{" - } - if right == "" { // default case - trueRight = "}}" - } - text = trueLeft + text + trueRight - // Now add a comment - text += trueLeft + "/*comment*/" + trueRight - // Now add an action containing a string. - text += trueLeft + `"` + trueLeft + `"` + trueRight - // At this point text looks like `{{.Str}}{{/*comment*/}}{{"{{"}}`. - tmpl, err := New("delims").Delims(left, right).Parse(text) - if err != nil { - t.Fatalf("delim %q text %q parse err %s", left, text, err) - } - var b = new(bytes.Buffer) - err = tmpl.Execute(b, value) - if err != nil { - t.Fatalf("delim %q exec err %s", left, err) - } - if b.String() != hello+trueLeft { - t.Errorf("expected %q got %q", hello+trueLeft, b.String()) - } - } -} - -// Check that an error from a method flows back to the top. -func TestExecuteError(t *testing.T) { - b := new(bytes.Buffer) - tmpl := New("error") - _, err := tmpl.Parse("{{.MyError true}}") - if err != nil { - t.Fatalf("parse error: %s", err) - } - err = tmpl.Execute(b, tVal) - if err == nil { - t.Errorf("expected error; got none") - } else if !strings.Contains(err.Error(), myError.Error()) { - if *debug { - fmt.Printf("test execute error: %s\n", err) - } - t.Errorf("expected myError; got %s", err) - } -} - -const execErrorText = `line 1 -line 2 -line 3 -{{template "one" .}} -{{define "one"}}{{template "two" .}}{{end}} -{{define "two"}}{{template "three" .}}{{end}} -{{define "three"}}{{index "hi" $}}{{end}}` - -// Check that an error from a nested template contains all the relevant information. -func TestExecError(t *testing.T) { - tmpl, err := New("top").Parse(execErrorText) - if err != nil { - t.Fatal("parse error:", err) - } - var b bytes.Buffer - err = tmpl.Execute(&b, 5) // 5 is out of range indexing "hi" - if err == nil { - t.Fatal("expected error") - } - const want = `template: top:7:20: executing "three" at : error calling index: index out of range: 5` - got := err.Error() - if got != want { - t.Errorf("expected\n%q\ngot\n%q", want, got) - } -} - -func TestJSEscaping(t *testing.T) { - testCases := []struct { - in, exp string - }{ - {`a`, `a`}, - {`'foo`, `\'foo`}, - {`Go "jump" \`, `Go \"jump\" \\`}, - {`Yukihiro says "今日は世界"`, `Yukihiro says \"今日は世界\"`}, - {"unprintable \uFDFF", `unprintable \uFDFF`}, - {``, `\x3Chtml\x3E`}, - } - for _, tc := range testCases { - s := JSEscapeString(tc.in) - if s != tc.exp { - t.Errorf("JS escaping [%s] got [%s] want [%s]", tc.in, s, tc.exp) - } - } -} - -// A nice example: walk a binary tree. - -type Tree struct { - Val int - Left, Right *Tree -} - -// Use different delimiters to test Set.Delims. -const treeTemplate = ` - (define "tree") - [ - (.Val) - (with .Left) - (template "tree" .) - (end) - (with .Right) - (template "tree" .) - (end) - ] - (end) -` - -func TestTree(t *testing.T) { - var tree = &Tree{ - 1, - &Tree{ - 2, &Tree{ - 3, - &Tree{ - 4, nil, nil, - }, - nil, - }, - &Tree{ - 5, - &Tree{ - 6, nil, nil, - }, - nil, - }, - }, - &Tree{ - 7, - &Tree{ - 8, - &Tree{ - 9, nil, nil, - }, - nil, - }, - &Tree{ - 10, - &Tree{ - 11, nil, nil, - }, - nil, - }, - }, - } - tmpl, err := New("root").Delims("(", ")").Parse(treeTemplate) - if err != nil { - t.Fatal("parse error:", err) - } - var b bytes.Buffer - stripSpace := func(r rune) rune { - if r == '\t' || r == '\n' { - return -1 - } - return r - } - const expect = "[1[2[3[4]][5[6]]][7[8[9]][10[11]]]]" - // First by looking up the template. - err = tmpl.Lookup("tree").Execute(&b, tree) - if err != nil { - t.Fatal("exec error:", err) - } - result := strings.Map(stripSpace, b.String()) - if result != expect { - t.Errorf("expected %q got %q", expect, result) - } - // Then direct to execution. - b.Reset() - err = tmpl.ExecuteTemplate(&b, "tree", tree) - if err != nil { - t.Fatal("exec error:", err) - } - result = strings.Map(stripSpace, b.String()) - if result != expect { - t.Errorf("expected %q got %q", expect, result) - } -} - -func TestExecuteOnNewTemplate(t *testing.T) { - // This is issue 3872. - _ = New("Name").Templates() -} - -const testTemplates = `{{define "one"}}one{{end}}{{define "two"}}two{{end}}` - -func TestMessageForExecuteEmpty(t *testing.T) { - // Test a truly empty template. - tmpl := New("empty") - var b bytes.Buffer - err := tmpl.Execute(&b, 0) - if err == nil { - t.Fatal("expected initial error") - } - got := err.Error() - want := `template: empty: "empty" is an incomplete or empty template` - if got != want { - t.Errorf("expected error %s got %s", want, got) - } - // Add a non-empty template to check that the error is helpful. - tests, err := New("").Parse(testTemplates) - if err != nil { - t.Fatal(err) - } - tmpl.AddParseTree("secondary", tests.Tree) - err = tmpl.Execute(&b, 0) - if err == nil { - t.Fatal("expected second error") - } - got = err.Error() - want = `template: empty: "empty" is an incomplete or empty template; defined templates are: "secondary"` - if got != want { - t.Errorf("expected error %s got %s", want, got) - } - // Make sure we can execute the secondary. - err = tmpl.ExecuteTemplate(&b, "secondary", 0) - if err != nil { - t.Fatal(err) - } -} - -func TestFinalForPrintf(t *testing.T) { - tmpl, err := New("").Parse(`{{"x" | printf}}`) - if err != nil { - t.Fatal(err) - } - var b bytes.Buffer - err = tmpl.Execute(&b, 0) - if err != nil { - t.Fatal(err) - } -} - -type cmpTest struct { - expr string - truth string - ok bool -} - -var cmpTests = []cmpTest{ - {"eq true true", "true", true}, - {"eq true false", "false", true}, - {"eq 1+2i 1+2i", "true", true}, - {"eq 1+2i 1+3i", "false", true}, - {"eq 1.5 1.5", "true", true}, - {"eq 1.5 2.5", "false", true}, - {"eq 1 1", "true", true}, - {"eq 1 2", "false", true}, - {"eq `xy` `xy`", "true", true}, - {"eq `xy` `xyz`", "false", true}, - {"eq .Uthree .Uthree", "true", true}, - {"eq .Uthree .Ufour", "false", true}, - {"eq 3 4 5 6 3", "true", true}, - {"eq 3 4 5 6 7", "false", true}, - {"ne true true", "false", true}, - {"ne true false", "true", true}, - {"ne 1+2i 1+2i", "false", true}, - {"ne 1+2i 1+3i", "true", true}, - {"ne 1.5 1.5", "false", true}, - {"ne 1.5 2.5", "true", true}, - {"ne 1 1", "false", true}, - {"ne 1 2", "true", true}, - {"ne `xy` `xy`", "false", true}, - {"ne `xy` `xyz`", "true", true}, - {"ne .Uthree .Uthree", "false", true}, - {"ne .Uthree .Ufour", "true", true}, - {"lt 1.5 1.5", "false", true}, - {"lt 1.5 2.5", "true", true}, - {"lt 1 1", "false", true}, - {"lt 1 2", "true", true}, - {"lt `xy` `xy`", "false", true}, - {"lt `xy` `xyz`", "true", true}, - {"lt .Uthree .Uthree", "false", true}, - {"lt .Uthree .Ufour", "true", true}, - {"le 1.5 1.5", "true", true}, - {"le 1.5 2.5", "true", true}, - {"le 2.5 1.5", "false", true}, - {"le 1 1", "true", true}, - {"le 1 2", "true", true}, - {"le 2 1", "false", true}, - {"le `xy` `xy`", "true", true}, - {"le `xy` `xyz`", "true", true}, - {"le `xyz` `xy`", "false", true}, - {"le .Uthree .Uthree", "true", true}, - {"le .Uthree .Ufour", "true", true}, - {"le .Ufour .Uthree", "false", true}, - {"gt 1.5 1.5", "false", true}, - {"gt 1.5 2.5", "false", true}, - {"gt 1 1", "false", true}, - {"gt 2 1", "true", true}, - {"gt 1 2", "false", true}, - {"gt `xy` `xy`", "false", true}, - {"gt `xy` `xyz`", "false", true}, - {"gt .Uthree .Uthree", "false", true}, - {"gt .Uthree .Ufour", "false", true}, - {"gt .Ufour .Uthree", "true", true}, - {"ge 1.5 1.5", "true", true}, - {"ge 1.5 2.5", "false", true}, - {"ge 2.5 1.5", "true", true}, - {"ge 1 1", "true", true}, - {"ge 1 2", "false", true}, - {"ge 2 1", "true", true}, - {"ge `xy` `xy`", "true", true}, - {"ge `xy` `xyz`", "false", true}, - {"ge `xyz` `xy`", "true", true}, - {"ge .Uthree .Uthree", "true", true}, - {"ge .Uthree .Ufour", "false", true}, - {"ge .Ufour .Uthree", "true", true}, - // Mixing signed and unsigned integers. - {"eq .Uthree .Three", "true", true}, - {"eq .Three .Uthree", "true", true}, - {"le .Uthree .Three", "true", true}, - {"le .Three .Uthree", "true", true}, - {"ge .Uthree .Three", "true", true}, - {"ge .Three .Uthree", "true", true}, - {"lt .Uthree .Three", "false", true}, - {"lt .Three .Uthree", "false", true}, - {"gt .Uthree .Three", "false", true}, - {"gt .Three .Uthree", "false", true}, - {"eq .Ufour .Three", "false", true}, - {"lt .Ufour .Three", "false", true}, - {"gt .Ufour .Three", "true", true}, - {"eq .NegOne .Uthree", "false", true}, - {"eq .Uthree .NegOne", "false", true}, - {"ne .NegOne .Uthree", "true", true}, - {"ne .Uthree .NegOne", "true", true}, - {"lt .NegOne .Uthree", "true", true}, - {"lt .Uthree .NegOne", "false", true}, - {"le .NegOne .Uthree", "true", true}, - {"le .Uthree .NegOne", "false", true}, - {"gt .NegOne .Uthree", "false", true}, - {"gt .Uthree .NegOne", "true", true}, - {"ge .NegOne .Uthree", "false", true}, - {"ge .Uthree .NegOne", "true", true}, - {"eq (index `x` 0) 'x'", "true", true}, // The example that triggered this rule. - {"eq (index `x` 0) 'y'", "false", true}, - // Errors - {"eq `xy` 1", "", false}, // Different types. - {"eq 2 2.0", "", false}, // Different types. - {"lt true true", "", false}, // Unordered types. - {"lt 1+0i 1+0i", "", false}, // Unordered types. -} - -func TestComparison(t *testing.T) { - b := new(bytes.Buffer) - var cmpStruct = struct { - Uthree, Ufour uint - NegOne, Three int - }{3, 4, -1, 3} - for _, test := range cmpTests { - text := fmt.Sprintf("{{if %s}}true{{else}}false{{end}}", test.expr) - tmpl, err := New("empty").Parse(text) - if err != nil { - t.Fatalf("%q: %s", test.expr, err) - } - b.Reset() - err = tmpl.Execute(b, &cmpStruct) - if test.ok && err != nil { - t.Errorf("%s errored incorrectly: %s", test.expr, err) - continue - } - if !test.ok && err == nil { - t.Errorf("%s did not error", test.expr) - continue - } - if b.String() != test.truth { - t.Errorf("%s: want %s; got %s", test.expr, test.truth, b.String()) - } - } -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/funcs.go b/Godeps/_workspace/src/github.com/alecthomas/template/funcs.go deleted file mode 100644 index 39ee5ed68..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/funcs.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "bytes" - "errors" - "fmt" - "io" - "net/url" - "reflect" - "strings" - "unicode" - "unicode/utf8" -) - -// FuncMap is the type of the map defining the mapping from names to functions. -// Each function must have either a single return value, or two return values of -// which the second has type error. In that case, if the second (error) -// return value evaluates to non-nil during execution, execution terminates and -// Execute returns that error. -type FuncMap map[string]interface{} - -var builtins = FuncMap{ - "and": and, - "call": call, - "html": HTMLEscaper, - "index": index, - "js": JSEscaper, - "len": length, - "not": not, - "or": or, - "print": fmt.Sprint, - "printf": fmt.Sprintf, - "println": fmt.Sprintln, - "urlquery": URLQueryEscaper, - - // Comparisons - "eq": eq, // == - "ge": ge, // >= - "gt": gt, // > - "le": le, // <= - "lt": lt, // < - "ne": ne, // != -} - -var builtinFuncs = createValueFuncs(builtins) - -// createValueFuncs turns a FuncMap into a map[string]reflect.Value -func createValueFuncs(funcMap FuncMap) map[string]reflect.Value { - m := make(map[string]reflect.Value) - addValueFuncs(m, funcMap) - return m -} - -// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values. -func addValueFuncs(out map[string]reflect.Value, in FuncMap) { - for name, fn := range in { - v := reflect.ValueOf(fn) - if v.Kind() != reflect.Func { - panic("value for " + name + " not a function") - } - if !goodFunc(v.Type()) { - panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut())) - } - out[name] = v - } -} - -// addFuncs adds to values the functions in funcs. It does no checking of the input - -// call addValueFuncs first. -func addFuncs(out, in FuncMap) { - for name, fn := range in { - out[name] = fn - } -} - -// goodFunc checks that the function or method has the right result signature. -func goodFunc(typ reflect.Type) bool { - // We allow functions with 1 result or 2 results where the second is an error. - switch { - case typ.NumOut() == 1: - return true - case typ.NumOut() == 2 && typ.Out(1) == errorType: - return true - } - return false -} - -// findFunction looks for a function in the template, and global map. -func findFunction(name string, tmpl *Template) (reflect.Value, bool) { - if tmpl != nil && tmpl.common != nil { - if fn := tmpl.execFuncs[name]; fn.IsValid() { - return fn, true - } - } - if fn := builtinFuncs[name]; fn.IsValid() { - return fn, true - } - return reflect.Value{}, false -} - -// Indexing. - -// index returns the result of indexing its first argument by the following -// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each -// indexed item must be a map, slice, or array. -func index(item interface{}, indices ...interface{}) (interface{}, error) { - v := reflect.ValueOf(item) - for _, i := range indices { - index := reflect.ValueOf(i) - var isNil bool - if v, isNil = indirect(v); isNil { - return nil, fmt.Errorf("index of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.String: - var x int64 - switch index.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x = index.Int() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - x = int64(index.Uint()) - default: - return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type()) - } - if x < 0 || x >= int64(v.Len()) { - return nil, fmt.Errorf("index out of range: %d", x) - } - v = v.Index(int(x)) - case reflect.Map: - if !index.IsValid() { - index = reflect.Zero(v.Type().Key()) - } - if !index.Type().AssignableTo(v.Type().Key()) { - return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type()) - } - if x := v.MapIndex(index); x.IsValid() { - v = x - } else { - v = reflect.Zero(v.Type().Elem()) - } - default: - return nil, fmt.Errorf("can't index item of type %s", v.Type()) - } - } - return v.Interface(), nil -} - -// Length - -// length returns the length of the item, with an error if it has no defined length. -func length(item interface{}) (int, error) { - v, isNil := indirect(reflect.ValueOf(item)) - if isNil { - return 0, fmt.Errorf("len of nil pointer") - } - switch v.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String: - return v.Len(), nil - } - return 0, fmt.Errorf("len of type %s", v.Type()) -} - -// Function invocation - -// call returns the result of evaluating the first argument as a function. -// The function must return 1 result, or 2 results, the second of which is an error. -func call(fn interface{}, args ...interface{}) (interface{}, error) { - v := reflect.ValueOf(fn) - typ := v.Type() - if typ.Kind() != reflect.Func { - return nil, fmt.Errorf("non-function of type %s", typ) - } - if !goodFunc(typ) { - return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut()) - } - numIn := typ.NumIn() - var dddType reflect.Type - if typ.IsVariadic() { - if len(args) < numIn-1 { - return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1) - } - dddType = typ.In(numIn - 1).Elem() - } else { - if len(args) != numIn { - return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn) - } - } - argv := make([]reflect.Value, len(args)) - for i, arg := range args { - value := reflect.ValueOf(arg) - // Compute the expected type. Clumsy because of variadics. - var argType reflect.Type - if !typ.IsVariadic() || i < numIn-1 { - argType = typ.In(i) - } else { - argType = dddType - } - if !value.IsValid() && canBeNil(argType) { - value = reflect.Zero(argType) - } - if !value.Type().AssignableTo(argType) { - return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType) - } - argv[i] = value - } - result := v.Call(argv) - if len(result) == 2 && !result[1].IsNil() { - return result[0].Interface(), result[1].Interface().(error) - } - return result[0].Interface(), nil -} - -// Boolean logic. - -func truth(a interface{}) bool { - t, _ := isTrue(reflect.ValueOf(a)) - return t -} - -// and computes the Boolean AND of its arguments, returning -// the first false argument it encounters, or the last argument. -func and(arg0 interface{}, args ...interface{}) interface{} { - if !truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if !truth(arg0) { - break - } - } - return arg0 -} - -// or computes the Boolean OR of its arguments, returning -// the first true argument it encounters, or the last argument. -func or(arg0 interface{}, args ...interface{}) interface{} { - if truth(arg0) { - return arg0 - } - for i := range args { - arg0 = args[i] - if truth(arg0) { - break - } - } - return arg0 -} - -// not returns the Boolean negation of its argument. -func not(arg interface{}) (truth bool) { - truth, _ = isTrue(reflect.ValueOf(arg)) - return !truth -} - -// Comparison. - -// TODO: Perhaps allow comparison between signed and unsigned integers. - -var ( - errBadComparisonType = errors.New("invalid type for comparison") - errBadComparison = errors.New("incompatible types for comparison") - errNoComparison = errors.New("missing argument for comparison") -) - -type kind int - -const ( - invalidKind kind = iota - boolKind - complexKind - intKind - floatKind - integerKind - stringKind - uintKind -) - -func basicKind(v reflect.Value) (kind, error) { - switch v.Kind() { - case reflect.Bool: - return boolKind, nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return intKind, nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return uintKind, nil - case reflect.Float32, reflect.Float64: - return floatKind, nil - case reflect.Complex64, reflect.Complex128: - return complexKind, nil - case reflect.String: - return stringKind, nil - } - return invalidKind, errBadComparisonType -} - -// eq evaluates the comparison a == b || a == c || ... -func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - if len(arg2) == 0 { - return false, errNoComparison - } - for _, arg := range arg2 { - v2 := reflect.ValueOf(arg) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind: - truth = v1.Bool() == v2.Bool() - case complexKind: - truth = v1.Complex() == v2.Complex() - case floatKind: - truth = v1.Float() == v2.Float() - case intKind: - truth = v1.Int() == v2.Int() - case stringKind: - truth = v1.String() == v2.String() - case uintKind: - truth = v1.Uint() == v2.Uint() - default: - panic("invalid kind") - } - } - if truth { - return true, nil - } - } - return false, nil -} - -// ne evaluates the comparison a != b. -func ne(arg1, arg2 interface{}) (bool, error) { - // != is the inverse of ==. - equal, err := eq(arg1, arg2) - return !equal, err -} - -// lt evaluates the comparison a < b. -func lt(arg1, arg2 interface{}) (bool, error) { - v1 := reflect.ValueOf(arg1) - k1, err := basicKind(v1) - if err != nil { - return false, err - } - v2 := reflect.ValueOf(arg2) - k2, err := basicKind(v2) - if err != nil { - return false, err - } - truth := false - if k1 != k2 { - // Special case: Can compare integer values regardless of type's sign. - switch { - case k1 == intKind && k2 == uintKind: - truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint() - case k1 == uintKind && k2 == intKind: - truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int()) - default: - return false, errBadComparison - } - } else { - switch k1 { - case boolKind, complexKind: - return false, errBadComparisonType - case floatKind: - truth = v1.Float() < v2.Float() - case intKind: - truth = v1.Int() < v2.Int() - case stringKind: - truth = v1.String() < v2.String() - case uintKind: - truth = v1.Uint() < v2.Uint() - default: - panic("invalid kind") - } - } - return truth, nil -} - -// le evaluates the comparison <= b. -func le(arg1, arg2 interface{}) (bool, error) { - // <= is < or ==. - lessThan, err := lt(arg1, arg2) - if lessThan || err != nil { - return lessThan, err - } - return eq(arg1, arg2) -} - -// gt evaluates the comparison a > b. -func gt(arg1, arg2 interface{}) (bool, error) { - // > is the inverse of <=. - lessOrEqual, err := le(arg1, arg2) - if err != nil { - return false, err - } - return !lessOrEqual, nil -} - -// ge evaluates the comparison a >= b. -func ge(arg1, arg2 interface{}) (bool, error) { - // >= is the inverse of <. - lessThan, err := lt(arg1, arg2) - if err != nil { - return false, err - } - return !lessThan, nil -} - -// HTML escaping. - -var ( - htmlQuot = []byte(""") // shorter than """ - htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5 - htmlAmp = []byte("&") - htmlLt = []byte("<") - htmlGt = []byte(">") -) - -// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b. -func HTMLEscape(w io.Writer, b []byte) { - last := 0 - for i, c := range b { - var html []byte - switch c { - case '"': - html = htmlQuot - case '\'': - html = htmlApos - case '&': - html = htmlAmp - case '<': - html = htmlLt - case '>': - html = htmlGt - default: - continue - } - w.Write(b[last:i]) - w.Write(html) - last = i + 1 - } - w.Write(b[last:]) -} - -// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s. -func HTMLEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexAny(s, `'"&<>`) < 0 { - return s - } - var b bytes.Buffer - HTMLEscape(&b, []byte(s)) - return b.String() -} - -// HTMLEscaper returns the escaped HTML equivalent of the textual -// representation of its arguments. -func HTMLEscaper(args ...interface{}) string { - return HTMLEscapeString(evalArgs(args)) -} - -// JavaScript escaping. - -var ( - jsLowUni = []byte(`\u00`) - hex = []byte("0123456789ABCDEF") - - jsBackslash = []byte(`\\`) - jsApos = []byte(`\'`) - jsQuot = []byte(`\"`) - jsLt = []byte(`\x3C`) - jsGt = []byte(`\x3E`) -) - -// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b. -func JSEscape(w io.Writer, b []byte) { - last := 0 - for i := 0; i < len(b); i++ { - c := b[i] - - if !jsIsSpecial(rune(c)) { - // fast path: nothing to do - continue - } - w.Write(b[last:i]) - - if c < utf8.RuneSelf { - // Quotes, slashes and angle brackets get quoted. - // Control characters get written as \u00XX. - switch c { - case '\\': - w.Write(jsBackslash) - case '\'': - w.Write(jsApos) - case '"': - w.Write(jsQuot) - case '<': - w.Write(jsLt) - case '>': - w.Write(jsGt) - default: - w.Write(jsLowUni) - t, b := c>>4, c&0x0f - w.Write(hex[t : t+1]) - w.Write(hex[b : b+1]) - } - } else { - // Unicode rune. - r, size := utf8.DecodeRune(b[i:]) - if unicode.IsPrint(r) { - w.Write(b[i : i+size]) - } else { - fmt.Fprintf(w, "\\u%04X", r) - } - i += size - 1 - } - last = i + 1 - } - w.Write(b[last:]) -} - -// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s. -func JSEscapeString(s string) string { - // Avoid allocation if we can. - if strings.IndexFunc(s, jsIsSpecial) < 0 { - return s - } - var b bytes.Buffer - JSEscape(&b, []byte(s)) - return b.String() -} - -func jsIsSpecial(r rune) bool { - switch r { - case '\\', '\'', '"', '<', '>': - return true - } - return r < ' ' || utf8.RuneSelf <= r -} - -// JSEscaper returns the escaped JavaScript equivalent of the textual -// representation of its arguments. -func JSEscaper(args ...interface{}) string { - return JSEscapeString(evalArgs(args)) -} - -// URLQueryEscaper returns the escaped value of the textual representation of -// its arguments in a form suitable for embedding in a URL query. -func URLQueryEscaper(args ...interface{}) string { - return url.QueryEscape(evalArgs(args)) -} - -// evalArgs formats the list of arguments into a string. It is therefore equivalent to -// fmt.Sprint(args...) -// except that each argument is indirected (if a pointer), as required, -// using the same rules as the default string evaluation during template -// execution. -func evalArgs(args []interface{}) string { - ok := false - var s string - // Fast path for simple common case. - if len(args) == 1 { - s, ok = args[0].(string) - } - if !ok { - for i, arg := range args { - a, ok := printableValue(reflect.ValueOf(arg)) - if ok { - args[i] = a - } // else left fmt do its thing - } - s = fmt.Sprint(args...) - } - return s -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/helper.go b/Godeps/_workspace/src/github.com/alecthomas/template/helper.go deleted file mode 100644 index 3636fb54d..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/helper.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Helper functions to make constructing templates easier. - -package template - -import ( - "fmt" - "io/ioutil" - "path/filepath" -) - -// Functions and methods to parse templates. - -// Must is a helper that wraps a call to a function returning (*Template, error) -// and panics if the error is non-nil. It is intended for use in variable -// initializations such as -// var t = template.Must(template.New("name").Parse("text")) -func Must(t *Template, err error) *Template { - if err != nil { - panic(err) - } - return t -} - -// ParseFiles creates a new Template and parses the template definitions from -// the named files. The returned template's name will have the (base) name and -// (parsed) contents of the first file. There must be at least one file. -// If an error occurs, parsing stops and the returned *Template is nil. -func ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(nil, filenames...) -} - -// ParseFiles parses the named files and associates the resulting templates with -// t. If an error occurs, parsing stops and the returned template is nil; -// otherwise it is t. There must be at least one file. -func (t *Template) ParseFiles(filenames ...string) (*Template, error) { - return parseFiles(t, filenames...) -} - -// parseFiles is the helper for the method and function. If the argument -// template is nil, it is created from the first file. -func parseFiles(t *Template, filenames ...string) (*Template, error) { - if len(filenames) == 0 { - // Not really a problem, but be consistent. - return nil, fmt.Errorf("template: no files named in call to ParseFiles") - } - for _, filename := range filenames { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - s := string(b) - name := filepath.Base(filename) - // First template becomes return value if not already defined, - // and we use that one for subsequent New calls to associate - // all the templates together. Also, if this file has the same name - // as t, this file becomes the contents of t, so - // t, err := New(name).Funcs(xxx).ParseFiles(name) - // works. Otherwise we create a new template associated with t. - var tmpl *Template - if t == nil { - t = New(name) - } - if name == t.Name() { - tmpl = t - } else { - tmpl = t.New(name) - } - _, err = tmpl.Parse(s) - if err != nil { - return nil, err - } - } - return t, nil -} - -// ParseGlob creates a new Template and parses the template definitions from the -// files identified by the pattern, which must match at least one file. The -// returned template will have the (base) name and (parsed) contents of the -// first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. -func ParseGlob(pattern string) (*Template, error) { - return parseGlob(nil, pattern) -} - -// ParseGlob parses the template definitions in the files identified by the -// pattern and associates the resulting templates with t. The pattern is -// processed by filepath.Glob and must match at least one file. ParseGlob is -// equivalent to calling t.ParseFiles with the list of files matched by the -// pattern. -func (t *Template) ParseGlob(pattern string) (*Template, error) { - return parseGlob(t, pattern) -} - -// parseGlob is the implementation of the function and method ParseGlob. -func parseGlob(t *Template, pattern string) (*Template, error) { - filenames, err := filepath.Glob(pattern) - if err != nil { - return nil, err - } - if len(filenames) == 0 { - return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern) - } - return parseFiles(t, filenames...) -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/multi_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/multi_test.go deleted file mode 100644 index 0cfd2b5f6..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/multi_test.go +++ /dev/null @@ -1,292 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -// Tests for mulitple-template parsing and execution. - -import ( - "bytes" - "fmt" - "github.com/alecthomas/template/parse" - "strings" - "testing" -) - -const ( - noError = true - hasError = false -) - -type multiParseTest struct { - name string - input string - ok bool - names []string - results []string -} - -var multiParseTests = []multiParseTest{ - {"empty", "", noError, - nil, - nil}, - {"one", `{{define "foo"}} FOO {{end}}`, noError, - []string{"foo"}, - []string{" FOO "}}, - {"two", `{{define "foo"}} FOO {{end}}{{define "bar"}} BAR {{end}}`, noError, - []string{"foo", "bar"}, - []string{" FOO ", " BAR "}}, - // errors - {"missing end", `{{define "foo"}} FOO `, hasError, - nil, - nil}, - {"malformed name", `{{define "foo}} FOO `, hasError, - nil, - nil}, -} - -func TestMultiParse(t *testing.T) { - for _, test := range multiParseTests { - template, err := New("root").Parse(test.input) - switch { - case err == nil && !test.ok: - t.Errorf("%q: expected error; got none", test.name) - continue - case err != nil && test.ok: - t.Errorf("%q: unexpected error: %v", test.name, err) - continue - case err != nil && !test.ok: - // expected error, got one - if *debug { - fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) - } - continue - } - if template == nil { - continue - } - if len(template.tmpl) != len(test.names)+1 { // +1 for root - t.Errorf("%s: wrong number of templates; wanted %d got %d", test.name, len(test.names), len(template.tmpl)) - continue - } - for i, name := range test.names { - tmpl, ok := template.tmpl[name] - if !ok { - t.Errorf("%s: can't find template %q", test.name, name) - continue - } - result := tmpl.Root.String() - if result != test.results[i] { - t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.results[i]) - } - } - } -} - -var multiExecTests = []execTest{ - {"empty", "", "", nil, true}, - {"text", "some text", "some text", nil, true}, - {"invoke x", `{{template "x" .SI}}`, "TEXT", tVal, true}, - {"invoke x no args", `{{template "x"}}`, "TEXT", tVal, true}, - {"invoke dot int", `{{template "dot" .I}}`, "17", tVal, true}, - {"invoke dot []int", `{{template "dot" .SI}}`, "[3 4 5]", tVal, true}, - {"invoke dotV", `{{template "dotV" .U}}`, "v", tVal, true}, - {"invoke nested int", `{{template "nested" .I}}`, "17", tVal, true}, - {"variable declared by template", `{{template "nested" $x:=.SI}},{{index $x 1}}`, "[3 4 5],4", tVal, true}, - - // User-defined function: test argument evaluator. - {"testFunc literal", `{{oneArg "joe"}}`, "oneArg=joe", tVal, true}, - {"testFunc .", `{{oneArg .}}`, "oneArg=joe", "joe", true}, -} - -// These strings are also in testdata/*. -const multiText1 = ` - {{define "x"}}TEXT{{end}} - {{define "dotV"}}{{.V}}{{end}} -` - -const multiText2 = ` - {{define "dot"}}{{.}}{{end}} - {{define "nested"}}{{template "dot" .}}{{end}} -` - -func TestMultiExecute(t *testing.T) { - // Declare a couple of templates first. - template, err := New("root").Parse(multiText1) - if err != nil { - t.Fatalf("parse error for 1: %s", err) - } - _, err = template.Parse(multiText2) - if err != nil { - t.Fatalf("parse error for 2: %s", err) - } - testExecute(multiExecTests, template, t) -} - -func TestParseFiles(t *testing.T) { - _, err := ParseFiles("DOES NOT EXIST") - if err == nil { - t.Error("expected error for non-existent file; got none") - } - template := New("root") - _, err = template.ParseFiles("testdata/file1.tmpl", "testdata/file2.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(multiExecTests, template, t) -} - -func TestParseGlob(t *testing.T) { - _, err := ParseGlob("DOES NOT EXIST") - if err == nil { - t.Error("expected error for non-existent file; got none") - } - _, err = New("error").ParseGlob("[x") - if err == nil { - t.Error("expected error for bad pattern; got none") - } - template := New("root") - _, err = template.ParseGlob("testdata/file*.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(multiExecTests, template, t) -} - -// In these tests, actual content (not just template definitions) comes from the parsed files. - -var templateFileExecTests = []execTest{ - {"test", `{{template "tmpl1.tmpl"}}{{template "tmpl2.tmpl"}}`, "template1\n\ny\ntemplate2\n\nx\n", 0, true}, -} - -func TestParseFilesWithData(t *testing.T) { - template, err := New("root").ParseFiles("testdata/tmpl1.tmpl", "testdata/tmpl2.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(templateFileExecTests, template, t) -} - -func TestParseGlobWithData(t *testing.T) { - template, err := New("root").ParseGlob("testdata/tmpl*.tmpl") - if err != nil { - t.Fatalf("error parsing files: %v", err) - } - testExecute(templateFileExecTests, template, t) -} - -const ( - cloneText1 = `{{define "a"}}{{template "b"}}{{template "c"}}{{end}}` - cloneText2 = `{{define "b"}}b{{end}}` - cloneText3 = `{{define "c"}}root{{end}}` - cloneText4 = `{{define "c"}}clone{{end}}` -) - -func TestClone(t *testing.T) { - // Create some templates and clone the root. - root, err := New("root").Parse(cloneText1) - if err != nil { - t.Fatal(err) - } - _, err = root.Parse(cloneText2) - if err != nil { - t.Fatal(err) - } - clone := Must(root.Clone()) - // Add variants to both. - _, err = root.Parse(cloneText3) - if err != nil { - t.Fatal(err) - } - _, err = clone.Parse(cloneText4) - if err != nil { - t.Fatal(err) - } - // Verify that the clone is self-consistent. - for k, v := range clone.tmpl { - if k == clone.name && v.tmpl[k] != clone { - t.Error("clone does not contain root") - } - if v != v.tmpl[v.name] { - t.Errorf("clone does not contain self for %q", k) - } - } - // Execute root. - var b bytes.Buffer - err = root.ExecuteTemplate(&b, "a", 0) - if err != nil { - t.Fatal(err) - } - if b.String() != "broot" { - t.Errorf("expected %q got %q", "broot", b.String()) - } - // Execute copy. - b.Reset() - err = clone.ExecuteTemplate(&b, "a", 0) - if err != nil { - t.Fatal(err) - } - if b.String() != "bclone" { - t.Errorf("expected %q got %q", "bclone", b.String()) - } -} - -func TestAddParseTree(t *testing.T) { - // Create some templates. - root, err := New("root").Parse(cloneText1) - if err != nil { - t.Fatal(err) - } - _, err = root.Parse(cloneText2) - if err != nil { - t.Fatal(err) - } - // Add a new parse tree. - tree, err := parse.Parse("cloneText3", cloneText3, "", "", nil, builtins) - if err != nil { - t.Fatal(err) - } - added, err := root.AddParseTree("c", tree["c"]) - // Execute. - var b bytes.Buffer - err = added.ExecuteTemplate(&b, "a", 0) - if err != nil { - t.Fatal(err) - } - if b.String() != "broot" { - t.Errorf("expected %q got %q", "broot", b.String()) - } -} - -// Issue 7032 -func TestAddParseTreeToUnparsedTemplate(t *testing.T) { - master := "{{define \"master\"}}{{end}}" - tmpl := New("master") - tree, err := parse.Parse("master", master, "", "", nil) - if err != nil { - t.Fatalf("unexpected parse err: %v", err) - } - masterTree := tree["master"] - tmpl.AddParseTree("master", masterTree) // used to panic -} - -func TestRedefinition(t *testing.T) { - var tmpl *Template - var err error - if tmpl, err = New("tmpl1").Parse(`{{define "test"}}foo{{end}}`); err != nil { - t.Fatalf("parse 1: %v", err) - } - if _, err = tmpl.Parse(`{{define "test"}}bar{{end}}`); err == nil { - t.Fatal("expected error") - } - if !strings.Contains(err.Error(), "redefinition") { - t.Fatalf("expected redefinition error; got %v", err) - } - if _, err = tmpl.New("tmpl2").Parse(`{{define "test"}}bar{{end}}`); err == nil { - t.Fatal("expected error") - } - if !strings.Contains(err.Error(), "redefinition") { - t.Fatalf("expected redefinition error; got %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go b/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go deleted file mode 100644 index 55f1c051e..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// item represents a token or text string returned from the scanner. -type item struct { - typ itemType // The type of this item. - pos Pos // The starting position, in bytes, of this item in the input string. - val string // The value of this item. -} - -func (i item) String() string { - switch { - case i.typ == itemEOF: - return "EOF" - case i.typ == itemError: - return i.val - case i.typ > itemKeyword: - return fmt.Sprintf("<%s>", i.val) - case len(i.val) > 10: - return fmt.Sprintf("%.10q...", i.val) - } - return fmt.Sprintf("%q", i.val) -} - -// itemType identifies the type of lex items. -type itemType int - -const ( - itemError itemType = iota // error occurred; value is text of error - itemBool // boolean constant - itemChar // printable ASCII character; grab bag for comma etc. - itemCharConstant // character constant - itemComplex // complex constant (1+2i); imaginary is just a number - itemColonEquals // colon-equals (':=') introducing a declaration - itemEOF - itemField // alphanumeric identifier starting with '.' - itemIdentifier // alphanumeric identifier not starting with '.' - itemLeftDelim // left action delimiter - itemLeftParen // '(' inside action - itemNumber // simple number, including imaginary - itemPipe // pipe symbol - itemRawString // raw quoted string (includes quotes) - itemRightDelim // right action delimiter - itemElideNewline // elide newline after right delim - itemRightParen // ')' inside action - itemSpace // run of spaces separating arguments - itemString // quoted string (includes quotes) - itemText // plain text - itemVariable // variable starting with '$', such as '$' or '$1' or '$hello' - // Keywords appear after all the rest. - itemKeyword // used only to delimit the keywords - itemDot // the cursor, spelled '.' - itemDefine // define keyword - itemElse // else keyword - itemEnd // end keyword - itemIf // if keyword - itemNil // the untyped nil constant, easiest to treat as a keyword - itemRange // range keyword - itemTemplate // template keyword - itemWith // with keyword -) - -var key = map[string]itemType{ - ".": itemDot, - "define": itemDefine, - "else": itemElse, - "end": itemEnd, - "if": itemIf, - "range": itemRange, - "nil": itemNil, - "template": itemTemplate, - "with": itemWith, -} - -const eof = -1 - -// stateFn represents the state of the scanner as a function that returns the next state. -type stateFn func(*lexer) stateFn - -// lexer holds the state of the scanner. -type lexer struct { - name string // the name of the input; used only for error reports - input string // the string being scanned - leftDelim string // start of action - rightDelim string // end of action - state stateFn // the next lexing function to enter - pos Pos // current position in the input - start Pos // start position of this item - width Pos // width of last rune read from input - lastPos Pos // position of most recent item returned by nextItem - items chan item // channel of scanned items - parenDepth int // nesting depth of ( ) exprs -} - -// next returns the next rune in the input. -func (l *lexer) next() rune { - if int(l.pos) >= len(l.input) { - l.width = 0 - return eof - } - r, w := utf8.DecodeRuneInString(l.input[l.pos:]) - l.width = Pos(w) - l.pos += l.width - return r -} - -// peek returns but does not consume the next rune in the input. -func (l *lexer) peek() rune { - r := l.next() - l.backup() - return r -} - -// backup steps back one rune. Can only be called once per call of next. -func (l *lexer) backup() { - l.pos -= l.width -} - -// emit passes an item back to the client. -func (l *lexer) emit(t itemType) { - l.items <- item{t, l.start, l.input[l.start:l.pos]} - l.start = l.pos -} - -// ignore skips over the pending input before this point. -func (l *lexer) ignore() { - l.start = l.pos -} - -// accept consumes the next rune if it's from the valid set. -func (l *lexer) accept(valid string) bool { - if strings.IndexRune(valid, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -// acceptRun consumes a run of runes from the valid set. -func (l *lexer) acceptRun(valid string) { - for strings.IndexRune(valid, l.next()) >= 0 { - } - l.backup() -} - -// lineNumber reports which line we're on, based on the position of -// the previous item returned by nextItem. Doing it this way -// means we don't have to worry about peek double counting. -func (l *lexer) lineNumber() int { - return 1 + strings.Count(l.input[:l.lastPos], "\n") -} - -// errorf returns an error token and terminates the scan by passing -// back a nil pointer that will be the next state, terminating l.nextItem. -func (l *lexer) errorf(format string, args ...interface{}) stateFn { - l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)} - return nil -} - -// nextItem returns the next item from the input. -func (l *lexer) nextItem() item { - item := <-l.items - l.lastPos = item.pos - return item -} - -// lex creates a new scanner for the input string. -func lex(name, input, left, right string) *lexer { - if left == "" { - left = leftDelim - } - if right == "" { - right = rightDelim - } - l := &lexer{ - name: name, - input: input, - leftDelim: left, - rightDelim: right, - items: make(chan item), - } - go l.run() - return l -} - -// run runs the state machine for the lexer. -func (l *lexer) run() { - for l.state = lexText; l.state != nil; { - l.state = l.state(l) - } -} - -// state functions - -const ( - leftDelim = "{{" - rightDelim = "}}" - leftComment = "/*" - rightComment = "*/" -) - -// lexText scans until an opening action delimiter, "{{". -func lexText(l *lexer) stateFn { - for { - if strings.HasPrefix(l.input[l.pos:], l.leftDelim) { - if l.pos > l.start { - l.emit(itemText) - } - return lexLeftDelim - } - if l.next() == eof { - break - } - } - // Correctly reached EOF. - if l.pos > l.start { - l.emit(itemText) - } - l.emit(itemEOF) - return nil -} - -// lexLeftDelim scans the left delimiter, which is known to be present. -func lexLeftDelim(l *lexer) stateFn { - l.pos += Pos(len(l.leftDelim)) - if strings.HasPrefix(l.input[l.pos:], leftComment) { - return lexComment - } - l.emit(itemLeftDelim) - l.parenDepth = 0 - return lexInsideAction -} - -// lexComment scans a comment. The left comment marker is known to be present. -func lexComment(l *lexer) stateFn { - l.pos += Pos(len(leftComment)) - i := strings.Index(l.input[l.pos:], rightComment) - if i < 0 { - return l.errorf("unclosed comment") - } - l.pos += Pos(i + len(rightComment)) - if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - return l.errorf("comment ends before closing delimiter") - - } - l.pos += Pos(len(l.rightDelim)) - l.ignore() - return lexText -} - -// lexRightDelim scans the right delimiter, which is known to be present. -func lexRightDelim(l *lexer) stateFn { - l.pos += Pos(len(l.rightDelim)) - l.emit(itemRightDelim) - if l.peek() == '\\' { - l.pos++ - l.emit(itemElideNewline) - } - return lexText -} - -// lexInsideAction scans the elements inside action delimiters. -func lexInsideAction(l *lexer) stateFn { - // Either number, quoted string, or identifier. - // Spaces separate arguments; runs of spaces turn into itemSpace. - // Pipe symbols separate and are emitted. - if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) { - if l.parenDepth == 0 { - return lexRightDelim - } - return l.errorf("unclosed left paren") - } - switch r := l.next(); { - case r == eof || isEndOfLine(r): - return l.errorf("unclosed action") - case isSpace(r): - return lexSpace - case r == ':': - if l.next() != '=' { - return l.errorf("expected :=") - } - l.emit(itemColonEquals) - case r == '|': - l.emit(itemPipe) - case r == '"': - return lexQuote - case r == '`': - return lexRawQuote - case r == '$': - return lexVariable - case r == '\'': - return lexChar - case r == '.': - // special look-ahead for ".field" so we don't break l.backup(). - if l.pos < Pos(len(l.input)) { - r := l.input[l.pos] - if r < '0' || '9' < r { - return lexField - } - } - fallthrough // '.' can start a number. - case r == '+' || r == '-' || ('0' <= r && r <= '9'): - l.backup() - return lexNumber - case isAlphaNumeric(r): - l.backup() - return lexIdentifier - case r == '(': - l.emit(itemLeftParen) - l.parenDepth++ - return lexInsideAction - case r == ')': - l.emit(itemRightParen) - l.parenDepth-- - if l.parenDepth < 0 { - return l.errorf("unexpected right paren %#U", r) - } - return lexInsideAction - case r <= unicode.MaxASCII && unicode.IsPrint(r): - l.emit(itemChar) - return lexInsideAction - default: - return l.errorf("unrecognized character in action: %#U", r) - } - return lexInsideAction -} - -// lexSpace scans a run of space characters. -// One space has already been seen. -func lexSpace(l *lexer) stateFn { - for isSpace(l.peek()) { - l.next() - } - l.emit(itemSpace) - return lexInsideAction -} - -// lexIdentifier scans an alphanumeric. -func lexIdentifier(l *lexer) stateFn { -Loop: - for { - switch r := l.next(); { - case isAlphaNumeric(r): - // absorb. - default: - l.backup() - word := l.input[l.start:l.pos] - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - switch { - case key[word] > itemKeyword: - l.emit(key[word]) - case word[0] == '.': - l.emit(itemField) - case word == "true", word == "false": - l.emit(itemBool) - default: - l.emit(itemIdentifier) - } - break Loop - } - } - return lexInsideAction -} - -// lexField scans a field: .Alphanumeric. -// The . has been scanned. -func lexField(l *lexer) stateFn { - return lexFieldOrVariable(l, itemField) -} - -// lexVariable scans a Variable: $Alphanumeric. -// The $ has been scanned. -func lexVariable(l *lexer) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "$". - l.emit(itemVariable) - return lexInsideAction - } - return lexFieldOrVariable(l, itemVariable) -} - -// lexVariable scans a field or variable: [.$]Alphanumeric. -// The . or $ has been scanned. -func lexFieldOrVariable(l *lexer, typ itemType) stateFn { - if l.atTerminator() { // Nothing interesting follows -> "." or "$". - if typ == itemVariable { - l.emit(itemVariable) - } else { - l.emit(itemDot) - } - return lexInsideAction - } - var r rune - for { - r = l.next() - if !isAlphaNumeric(r) { - l.backup() - break - } - } - if !l.atTerminator() { - return l.errorf("bad character %#U", r) - } - l.emit(typ) - return lexInsideAction -} - -// atTerminator reports whether the input is at valid termination character to -// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases -// like "$x+2" not being acceptable without a space, in case we decide one -// day to implement arithmetic. -func (l *lexer) atTerminator() bool { - r := l.peek() - if isSpace(r) || isEndOfLine(r) { - return true - } - switch r { - case eof, '.', ',', '|', ':', ')', '(': - return true - } - // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will - // succeed but should fail) but only in extremely rare cases caused by willfully - // bad choice of delimiter. - if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r { - return true - } - return false -} - -// lexChar scans a character constant. The initial quote is already -// scanned. Syntax checking is done by the parser. -func lexChar(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated character constant") - case '\'': - break Loop - } - } - l.emit(itemCharConstant) - return lexInsideAction -} - -// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This -// isn't a perfect number scanner - for instance it accepts "." and "0x0.2" -// and "089" - but when it's wrong the input is invalid and the parser (via -// strconv) will notice. -func lexNumber(l *lexer) stateFn { - if !l.scanNumber() { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - if sign := l.peek(); sign == '+' || sign == '-' { - // Complex: 1+2i. No spaces, must end in 'i'. - if !l.scanNumber() || l.input[l.pos-1] != 'i' { - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(itemComplex) - } else { - l.emit(itemNumber) - } - return lexInsideAction -} - -func (l *lexer) scanNumber() bool { - // Optional leading sign. - l.accept("+-") - // Is it hex? - digits := "0123456789" - if l.accept("0") && l.accept("xX") { - digits = "0123456789abcdefABCDEF" - } - l.acceptRun(digits) - if l.accept(".") { - l.acceptRun(digits) - } - if l.accept("eE") { - l.accept("+-") - l.acceptRun("0123456789") - } - // Is it imaginary? - l.accept("i") - // Next thing mustn't be alphanumeric. - if isAlphaNumeric(l.peek()) { - l.next() - return false - } - return true -} - -// lexQuote scans a quoted string. -func lexQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != eof && r != '\n' { - break - } - fallthrough - case eof, '\n': - return l.errorf("unterminated quoted string") - case '"': - break Loop - } - } - l.emit(itemString) - return lexInsideAction -} - -// lexRawQuote scans a raw quoted string. -func lexRawQuote(l *lexer) stateFn { -Loop: - for { - switch l.next() { - case eof, '\n': - return l.errorf("unterminated raw quoted string") - case '`': - break Loop - } - } - l.emit(itemRawString) - return lexInsideAction -} - -// isSpace reports whether r is a space character. -func isSpace(r rune) bool { - return r == ' ' || r == '\t' -} - -// isEndOfLine reports whether r is an end-of-line character. -func isEndOfLine(r rune) bool { - return r == '\r' || r == '\n' -} - -// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore. -func isAlphaNumeric(r rune) bool { - return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r) -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex_test.go deleted file mode 100644 index 3b921076a..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex_test.go +++ /dev/null @@ -1,468 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "fmt" - "testing" -) - -// Make the types prettyprint. -var itemName = map[itemType]string{ - itemError: "error", - itemBool: "bool", - itemChar: "char", - itemCharConstant: "charconst", - itemComplex: "complex", - itemColonEquals: ":=", - itemEOF: "EOF", - itemField: "field", - itemIdentifier: "identifier", - itemLeftDelim: "left delim", - itemLeftParen: "(", - itemNumber: "number", - itemPipe: "pipe", - itemRawString: "raw string", - itemRightDelim: "right delim", - itemElideNewline: "elide newline", - itemRightParen: ")", - itemSpace: "space", - itemString: "string", - itemVariable: "variable", - - // keywords - itemDot: ".", - itemDefine: "define", - itemElse: "else", - itemIf: "if", - itemEnd: "end", - itemNil: "nil", - itemRange: "range", - itemTemplate: "template", - itemWith: "with", -} - -func (i itemType) String() string { - s := itemName[i] - if s == "" { - return fmt.Sprintf("item%d", int(i)) - } - return s -} - -type lexTest struct { - name string - input string - items []item -} - -var ( - tEOF = item{itemEOF, 0, ""} - tFor = item{itemIdentifier, 0, "for"} - tLeft = item{itemLeftDelim, 0, "{{"} - tLpar = item{itemLeftParen, 0, "("} - tPipe = item{itemPipe, 0, "|"} - tQuote = item{itemString, 0, `"abc \n\t\" "`} - tRange = item{itemRange, 0, "range"} - tRight = item{itemRightDelim, 0, "}}"} - tElideNewline = item{itemElideNewline, 0, "\\"} - tRpar = item{itemRightParen, 0, ")"} - tSpace = item{itemSpace, 0, " "} - raw = "`" + `abc\n\t\" ` + "`" - tRawQuote = item{itemRawString, 0, raw} -) - -var lexTests = []lexTest{ - {"empty", "", []item{tEOF}}, - {"spaces", " \t\n", []item{{itemText, 0, " \t\n"}, tEOF}}, - {"text", `now is the time`, []item{{itemText, 0, "now is the time"}, tEOF}}, - {"elide newline", "{{}}\\", []item{tLeft, tRight, tElideNewline, tEOF}}, - {"text with comment", "hello-{{/* this is a comment */}}-world", []item{ - {itemText, 0, "hello-"}, - {itemText, 0, "-world"}, - tEOF, - }}, - {"punctuation", "{{,@% }}", []item{ - tLeft, - {itemChar, 0, ","}, - {itemChar, 0, "@"}, - {itemChar, 0, "%"}, - tSpace, - tRight, - tEOF, - }}, - {"parens", "{{((3))}}", []item{ - tLeft, - tLpar, - tLpar, - {itemNumber, 0, "3"}, - tRpar, - tRpar, - tRight, - tEOF, - }}, - {"empty action", `{{}}`, []item{tLeft, tRight, tEOF}}, - {"for", `{{for}}`, []item{tLeft, tFor, tRight, tEOF}}, - {"quote", `{{"abc \n\t\" "}}`, []item{tLeft, tQuote, tRight, tEOF}}, - {"raw quote", "{{" + raw + "}}", []item{tLeft, tRawQuote, tRight, tEOF}}, - {"numbers", "{{1 02 0x14 -7.2i 1e3 +1.2e-4 4.2i 1+2i}}", []item{ - tLeft, - {itemNumber, 0, "1"}, - tSpace, - {itemNumber, 0, "02"}, - tSpace, - {itemNumber, 0, "0x14"}, - tSpace, - {itemNumber, 0, "-7.2i"}, - tSpace, - {itemNumber, 0, "1e3"}, - tSpace, - {itemNumber, 0, "+1.2e-4"}, - tSpace, - {itemNumber, 0, "4.2i"}, - tSpace, - {itemComplex, 0, "1+2i"}, - tRight, - tEOF, - }}, - {"characters", `{{'a' '\n' '\'' '\\' '\u00FF' '\xFF' '本'}}`, []item{ - tLeft, - {itemCharConstant, 0, `'a'`}, - tSpace, - {itemCharConstant, 0, `'\n'`}, - tSpace, - {itemCharConstant, 0, `'\''`}, - tSpace, - {itemCharConstant, 0, `'\\'`}, - tSpace, - {itemCharConstant, 0, `'\u00FF'`}, - tSpace, - {itemCharConstant, 0, `'\xFF'`}, - tSpace, - {itemCharConstant, 0, `'本'`}, - tRight, - tEOF, - }}, - {"bools", "{{true false}}", []item{ - tLeft, - {itemBool, 0, "true"}, - tSpace, - {itemBool, 0, "false"}, - tRight, - tEOF, - }}, - {"dot", "{{.}}", []item{ - tLeft, - {itemDot, 0, "."}, - tRight, - tEOF, - }}, - {"nil", "{{nil}}", []item{ - tLeft, - {itemNil, 0, "nil"}, - tRight, - tEOF, - }}, - {"dots", "{{.x . .2 .x.y.z}}", []item{ - tLeft, - {itemField, 0, ".x"}, - tSpace, - {itemDot, 0, "."}, - tSpace, - {itemNumber, 0, ".2"}, - tSpace, - {itemField, 0, ".x"}, - {itemField, 0, ".y"}, - {itemField, 0, ".z"}, - tRight, - tEOF, - }}, - {"keywords", "{{range if else end with}}", []item{ - tLeft, - {itemRange, 0, "range"}, - tSpace, - {itemIf, 0, "if"}, - tSpace, - {itemElse, 0, "else"}, - tSpace, - {itemEnd, 0, "end"}, - tSpace, - {itemWith, 0, "with"}, - tRight, - tEOF, - }}, - {"variables", "{{$c := printf $ $hello $23 $ $var.Field .Method}}", []item{ - tLeft, - {itemVariable, 0, "$c"}, - tSpace, - {itemColonEquals, 0, ":="}, - tSpace, - {itemIdentifier, 0, "printf"}, - tSpace, - {itemVariable, 0, "$"}, - tSpace, - {itemVariable, 0, "$hello"}, - tSpace, - {itemVariable, 0, "$23"}, - tSpace, - {itemVariable, 0, "$"}, - tSpace, - {itemVariable, 0, "$var"}, - {itemField, 0, ".Field"}, - tSpace, - {itemField, 0, ".Method"}, - tRight, - tEOF, - }}, - {"variable invocation", "{{$x 23}}", []item{ - tLeft, - {itemVariable, 0, "$x"}, - tSpace, - {itemNumber, 0, "23"}, - tRight, - tEOF, - }}, - {"pipeline", `intro {{echo hi 1.2 |noargs|args 1 "hi"}} outro`, []item{ - {itemText, 0, "intro "}, - tLeft, - {itemIdentifier, 0, "echo"}, - tSpace, - {itemIdentifier, 0, "hi"}, - tSpace, - {itemNumber, 0, "1.2"}, - tSpace, - tPipe, - {itemIdentifier, 0, "noargs"}, - tPipe, - {itemIdentifier, 0, "args"}, - tSpace, - {itemNumber, 0, "1"}, - tSpace, - {itemString, 0, `"hi"`}, - tRight, - {itemText, 0, " outro"}, - tEOF, - }}, - {"declaration", "{{$v := 3}}", []item{ - tLeft, - {itemVariable, 0, "$v"}, - tSpace, - {itemColonEquals, 0, ":="}, - tSpace, - {itemNumber, 0, "3"}, - tRight, - tEOF, - }}, - {"2 declarations", "{{$v , $w := 3}}", []item{ - tLeft, - {itemVariable, 0, "$v"}, - tSpace, - {itemChar, 0, ","}, - tSpace, - {itemVariable, 0, "$w"}, - tSpace, - {itemColonEquals, 0, ":="}, - tSpace, - {itemNumber, 0, "3"}, - tRight, - tEOF, - }}, - {"field of parenthesized expression", "{{(.X).Y}}", []item{ - tLeft, - tLpar, - {itemField, 0, ".X"}, - tRpar, - {itemField, 0, ".Y"}, - tRight, - tEOF, - }}, - // errors - {"badchar", "#{{\x01}}", []item{ - {itemText, 0, "#"}, - tLeft, - {itemError, 0, "unrecognized character in action: U+0001"}, - }}, - {"unclosed action", "{{\n}}", []item{ - tLeft, - {itemError, 0, "unclosed action"}, - }}, - {"EOF in action", "{{range", []item{ - tLeft, - tRange, - {itemError, 0, "unclosed action"}, - }}, - {"unclosed quote", "{{\"\n\"}}", []item{ - tLeft, - {itemError, 0, "unterminated quoted string"}, - }}, - {"unclosed raw quote", "{{`xx\n`}}", []item{ - tLeft, - {itemError, 0, "unterminated raw quoted string"}, - }}, - {"unclosed char constant", "{{'\n}}", []item{ - tLeft, - {itemError, 0, "unterminated character constant"}, - }}, - {"bad number", "{{3k}}", []item{ - tLeft, - {itemError, 0, `bad number syntax: "3k"`}, - }}, - {"unclosed paren", "{{(3}}", []item{ - tLeft, - tLpar, - {itemNumber, 0, "3"}, - {itemError, 0, `unclosed left paren`}, - }}, - {"extra right paren", "{{3)}}", []item{ - tLeft, - {itemNumber, 0, "3"}, - tRpar, - {itemError, 0, `unexpected right paren U+0029 ')'`}, - }}, - - // Fixed bugs - // Many elements in an action blew the lookahead until - // we made lexInsideAction not loop. - {"long pipeline deadlock", "{{|||||}}", []item{ - tLeft, - tPipe, - tPipe, - tPipe, - tPipe, - tPipe, - tRight, - tEOF, - }}, - {"text with bad comment", "hello-{{/*/}}-world", []item{ - {itemText, 0, "hello-"}, - {itemError, 0, `unclosed comment`}, - }}, - {"text with comment close separted from delim", "hello-{{/* */ }}-world", []item{ - {itemText, 0, "hello-"}, - {itemError, 0, `comment ends before closing delimiter`}, - }}, - // This one is an error that we can't catch because it breaks templates with - // minimized JavaScript. Should have fixed it before Go 1.1. - {"unmatched right delimiter", "hello-{.}}-world", []item{ - {itemText, 0, "hello-{.}}-world"}, - tEOF, - }}, -} - -// collect gathers the emitted items into a slice. -func collect(t *lexTest, left, right string) (items []item) { - l := lex(t.name, t.input, left, right) - for { - item := l.nextItem() - items = append(items, item) - if item.typ == itemEOF || item.typ == itemError { - break - } - } - return -} - -func equal(i1, i2 []item, checkPos bool) bool { - if len(i1) != len(i2) { - return false - } - for k := range i1 { - if i1[k].typ != i2[k].typ { - return false - } - if i1[k].val != i2[k].val { - return false - } - if checkPos && i1[k].pos != i2[k].pos { - return false - } - } - return true -} - -func TestLex(t *testing.T) { - for _, test := range lexTests { - items := collect(&test, "", "") - if !equal(items, test.items, false) { - t.Errorf("%s: got\n\t%+v\nexpected\n\t%v", test.name, items, test.items) - } - } -} - -// Some easy cases from above, but with delimiters $$ and @@ -var lexDelimTests = []lexTest{ - {"punctuation", "$$,@%{{}}@@", []item{ - tLeftDelim, - {itemChar, 0, ","}, - {itemChar, 0, "@"}, - {itemChar, 0, "%"}, - {itemChar, 0, "{"}, - {itemChar, 0, "{"}, - {itemChar, 0, "}"}, - {itemChar, 0, "}"}, - tRightDelim, - tEOF, - }}, - {"empty action", `$$@@`, []item{tLeftDelim, tRightDelim, tEOF}}, - {"for", `$$for@@`, []item{tLeftDelim, tFor, tRightDelim, tEOF}}, - {"quote", `$$"abc \n\t\" "@@`, []item{tLeftDelim, tQuote, tRightDelim, tEOF}}, - {"raw quote", "$$" + raw + "@@", []item{tLeftDelim, tRawQuote, tRightDelim, tEOF}}, -} - -var ( - tLeftDelim = item{itemLeftDelim, 0, "$$"} - tRightDelim = item{itemRightDelim, 0, "@@"} -) - -func TestDelims(t *testing.T) { - for _, test := range lexDelimTests { - items := collect(&test, "$$", "@@") - if !equal(items, test.items, false) { - t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items) - } - } -} - -var lexPosTests = []lexTest{ - {"empty", "", []item{tEOF}}, - {"punctuation", "{{,@%#}}", []item{ - {itemLeftDelim, 0, "{{"}, - {itemChar, 2, ","}, - {itemChar, 3, "@"}, - {itemChar, 4, "%"}, - {itemChar, 5, "#"}, - {itemRightDelim, 6, "}}"}, - {itemEOF, 8, ""}, - }}, - {"sample", "0123{{hello}}xyz", []item{ - {itemText, 0, "0123"}, - {itemLeftDelim, 4, "{{"}, - {itemIdentifier, 6, "hello"}, - {itemRightDelim, 11, "}}"}, - {itemText, 13, "xyz"}, - {itemEOF, 16, ""}, - }}, -} - -// The other tests don't check position, to make the test cases easier to construct. -// This one does. -func TestPos(t *testing.T) { - for _, test := range lexPosTests { - items := collect(&test, "", "") - if !equal(items, test.items, true) { - t.Errorf("%s: got\n\t%v\nexpected\n\t%v", test.name, items, test.items) - if len(items) == len(test.items) { - // Detailed print; avoid item.String() to expose the position value. - for i := range items { - if !equal(items[i:i+1], test.items[i:i+1], true) { - i1 := items[i] - i2 := test.items[i] - t.Errorf("\t#%d: got {%v %d %q} expected {%v %d %q}", i, i1.typ, i1.pos, i1.val, i2.typ, i2.pos, i2.val) - } - } - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go b/Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go deleted file mode 100644 index 55c37f6db..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go +++ /dev/null @@ -1,834 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Parse nodes. - -package parse - -import ( - "bytes" - "fmt" - "strconv" - "strings" -) - -var textFormat = "%s" // Changed to "%q" in tests for better error messages. - -// A Node is an element in the parse tree. The interface is trivial. -// The interface contains an unexported method so that only -// types local to this package can satisfy it. -type Node interface { - Type() NodeType - String() string - // Copy does a deep copy of the Node and all its components. - // To avoid type assertions, some XxxNodes also have specialized - // CopyXxx methods that return *XxxNode. - Copy() Node - Position() Pos // byte position of start of node in full original input string - // tree returns the containing *Tree. - // It is unexported so all implementations of Node are in this package. - tree() *Tree -} - -// NodeType identifies the type of a parse tree node. -type NodeType int - -// Pos represents a byte position in the original input text from which -// this template was parsed. -type Pos int - -func (p Pos) Position() Pos { - return p -} - -// Type returns itself and provides an easy default implementation -// for embedding in a Node. Embedded in all non-trivial Nodes. -func (t NodeType) Type() NodeType { - return t -} - -const ( - NodeText NodeType = iota // Plain text. - NodeAction // A non-control action such as a field evaluation. - NodeBool // A boolean constant. - NodeChain // A sequence of field accesses. - NodeCommand // An element of a pipeline. - NodeDot // The cursor, dot. - nodeElse // An else action. Not added to tree. - nodeEnd // An end action. Not added to tree. - NodeField // A field or method name. - NodeIdentifier // An identifier; always a function name. - NodeIf // An if action. - NodeList // A list of Nodes. - NodeNil // An untyped nil constant. - NodeNumber // A numerical constant. - NodePipe // A pipeline of commands. - NodeRange // A range action. - NodeString // A string constant. - NodeTemplate // A template invocation action. - NodeVariable // A $ variable. - NodeWith // A with action. -) - -// Nodes. - -// ListNode holds a sequence of nodes. -type ListNode struct { - NodeType - Pos - tr *Tree - Nodes []Node // The element nodes in lexical order. -} - -func (t *Tree) newList(pos Pos) *ListNode { - return &ListNode{tr: t, NodeType: NodeList, Pos: pos} -} - -func (l *ListNode) append(n Node) { - l.Nodes = append(l.Nodes, n) -} - -func (l *ListNode) tree() *Tree { - return l.tr -} - -func (l *ListNode) String() string { - b := new(bytes.Buffer) - for _, n := range l.Nodes { - fmt.Fprint(b, n) - } - return b.String() -} - -func (l *ListNode) CopyList() *ListNode { - if l == nil { - return l - } - n := l.tr.newList(l.Pos) - for _, elem := range l.Nodes { - n.append(elem.Copy()) - } - return n -} - -func (l *ListNode) Copy() Node { - return l.CopyList() -} - -// TextNode holds plain text. -type TextNode struct { - NodeType - Pos - tr *Tree - Text []byte // The text; may span newlines. -} - -func (t *Tree) newText(pos Pos, text string) *TextNode { - return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)} -} - -func (t *TextNode) String() string { - return fmt.Sprintf(textFormat, t.Text) -} - -func (t *TextNode) tree() *Tree { - return t.tr -} - -func (t *TextNode) Copy() Node { - return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)} -} - -// PipeNode holds a pipeline with optional declaration -type PipeNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Decl []*VariableNode // Variable declarations in lexical order. - Cmds []*CommandNode // The commands in lexical order. -} - -func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode { - return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl} -} - -func (p *PipeNode) append(command *CommandNode) { - p.Cmds = append(p.Cmds, command) -} - -func (p *PipeNode) String() string { - s := "" - if len(p.Decl) > 0 { - for i, v := range p.Decl { - if i > 0 { - s += ", " - } - s += v.String() - } - s += " := " - } - for i, c := range p.Cmds { - if i > 0 { - s += " | " - } - s += c.String() - } - return s -} - -func (p *PipeNode) tree() *Tree { - return p.tr -} - -func (p *PipeNode) CopyPipe() *PipeNode { - if p == nil { - return p - } - var decl []*VariableNode - for _, d := range p.Decl { - decl = append(decl, d.Copy().(*VariableNode)) - } - n := p.tr.newPipeline(p.Pos, p.Line, decl) - for _, c := range p.Cmds { - n.append(c.Copy().(*CommandNode)) - } - return n -} - -func (p *PipeNode) Copy() Node { - return p.CopyPipe() -} - -// ActionNode holds an action (something bounded by delimiters). -// Control actions have their own nodes; ActionNode represents simple -// ones such as field evaluations and parenthesized pipelines. -type ActionNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline in the action. -} - -func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode { - return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe} -} - -func (a *ActionNode) String() string { - return fmt.Sprintf("{{%s}}", a.Pipe) - -} - -func (a *ActionNode) tree() *Tree { - return a.tr -} - -func (a *ActionNode) Copy() Node { - return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) - -} - -// CommandNode holds a command (a pipeline inside an evaluating action). -type CommandNode struct { - NodeType - Pos - tr *Tree - Args []Node // Arguments in lexical order: Identifier, field, or constant. -} - -func (t *Tree) newCommand(pos Pos) *CommandNode { - return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos} -} - -func (c *CommandNode) append(arg Node) { - c.Args = append(c.Args, arg) -} - -func (c *CommandNode) String() string { - s := "" - for i, arg := range c.Args { - if i > 0 { - s += " " - } - if arg, ok := arg.(*PipeNode); ok { - s += "(" + arg.String() + ")" - continue - } - s += arg.String() - } - return s -} - -func (c *CommandNode) tree() *Tree { - return c.tr -} - -func (c *CommandNode) Copy() Node { - if c == nil { - return c - } - n := c.tr.newCommand(c.Pos) - for _, c := range c.Args { - n.append(c.Copy()) - } - return n -} - -// IdentifierNode holds an identifier. -type IdentifierNode struct { - NodeType - Pos - tr *Tree - Ident string // The identifier's name. -} - -// NewIdentifier returns a new IdentifierNode with the given identifier name. -func NewIdentifier(ident string) *IdentifierNode { - return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} -} - -// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { - i.Pos = pos - return i -} - -// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. -// Chained for convenience. -// TODO: fix one day? -func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { - i.tr = t - return i -} - -func (i *IdentifierNode) String() string { - return i.Ident -} - -func (i *IdentifierNode) tree() *Tree { - return i.tr -} - -func (i *IdentifierNode) Copy() Node { - return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos) -} - -// VariableNode holds a list of variable names, possibly with chained field -// accesses. The dollar sign is part of the (first) name. -type VariableNode struct { - NodeType - Pos - tr *Tree - Ident []string // Variable name and fields in lexical order. -} - -func (t *Tree) newVariable(pos Pos, ident string) *VariableNode { - return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")} -} - -func (v *VariableNode) String() string { - s := "" - for i, id := range v.Ident { - if i > 0 { - s += "." - } - s += id - } - return s -} - -func (v *VariableNode) tree() *Tree { - return v.tr -} - -func (v *VariableNode) Copy() Node { - return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)} -} - -// DotNode holds the special identifier '.'. -type DotNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newDot(pos Pos) *DotNode { - return &DotNode{tr: t, NodeType: NodeDot, Pos: pos} -} - -func (d *DotNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeDot -} - -func (d *DotNode) String() string { - return "." -} - -func (d *DotNode) tree() *Tree { - return d.tr -} - -func (d *DotNode) Copy() Node { - return d.tr.newDot(d.Pos) -} - -// NilNode holds the special identifier 'nil' representing an untyped nil constant. -type NilNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newNil(pos Pos) *NilNode { - return &NilNode{tr: t, NodeType: NodeNil, Pos: pos} -} - -func (n *NilNode) Type() NodeType { - // Override method on embedded NodeType for API compatibility. - // TODO: Not really a problem; could change API without effect but - // api tool complains. - return NodeNil -} - -func (n *NilNode) String() string { - return "nil" -} - -func (n *NilNode) tree() *Tree { - return n.tr -} - -func (n *NilNode) Copy() Node { - return n.tr.newNil(n.Pos) -} - -// FieldNode holds a field (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The period is dropped from each ident. -type FieldNode struct { - NodeType - Pos - tr *Tree - Ident []string // The identifiers in lexical order. -} - -func (t *Tree) newField(pos Pos, ident string) *FieldNode { - return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period -} - -func (f *FieldNode) String() string { - s := "" - for _, id := range f.Ident { - s += "." + id - } - return s -} - -func (f *FieldNode) tree() *Tree { - return f.tr -} - -func (f *FieldNode) Copy() Node { - return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)} -} - -// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.'). -// The names may be chained ('.x.y'). -// The periods are dropped from each ident. -type ChainNode struct { - NodeType - Pos - tr *Tree - Node Node - Field []string // The identifiers in lexical order. -} - -func (t *Tree) newChain(pos Pos, node Node) *ChainNode { - return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node} -} - -// Add adds the named field (which should start with a period) to the end of the chain. -func (c *ChainNode) Add(field string) { - if len(field) == 0 || field[0] != '.' { - panic("no dot in field") - } - field = field[1:] // Remove leading dot. - if field == "" { - panic("empty field") - } - c.Field = append(c.Field, field) -} - -func (c *ChainNode) String() string { - s := c.Node.String() - if _, ok := c.Node.(*PipeNode); ok { - s = "(" + s + ")" - } - for _, field := range c.Field { - s += "." + field - } - return s -} - -func (c *ChainNode) tree() *Tree { - return c.tr -} - -func (c *ChainNode) Copy() Node { - return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)} -} - -// BoolNode holds a boolean constant. -type BoolNode struct { - NodeType - Pos - tr *Tree - True bool // The value of the boolean constant. -} - -func (t *Tree) newBool(pos Pos, true bool) *BoolNode { - return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true} -} - -func (b *BoolNode) String() string { - if b.True { - return "true" - } - return "false" -} - -func (b *BoolNode) tree() *Tree { - return b.tr -} - -func (b *BoolNode) Copy() Node { - return b.tr.newBool(b.Pos, b.True) -} - -// NumberNode holds a number: signed or unsigned integer, float, or complex. -// The value is parsed and stored under all the types that can represent the value. -// This simulates in a small amount of code the behavior of Go's ideal constants. -type NumberNode struct { - NodeType - Pos - tr *Tree - IsInt bool // Number has an integral value. - IsUint bool // Number has an unsigned integral value. - IsFloat bool // Number has a floating-point value. - IsComplex bool // Number is complex. - Int64 int64 // The signed integer value. - Uint64 uint64 // The unsigned integer value. - Float64 float64 // The floating-point value. - Complex128 complex128 // The complex value. - Text string // The original textual representation from the input. -} - -func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) { - n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text} - switch typ { - case itemCharConstant: - rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0]) - if err != nil { - return nil, err - } - if tail != "'" { - return nil, fmt.Errorf("malformed character constant: %s", text) - } - n.Int64 = int64(rune) - n.IsInt = true - n.Uint64 = uint64(rune) - n.IsUint = true - n.Float64 = float64(rune) // odd but those are the rules. - n.IsFloat = true - return n, nil - case itemComplex: - // fmt.Sscan can parse the pair, so let it do the work. - if _, err := fmt.Sscan(text, &n.Complex128); err != nil { - return nil, err - } - n.IsComplex = true - n.simplifyComplex() - return n, nil - } - // Imaginary constants can only be complex unless they are zero. - if len(text) > 0 && text[len(text)-1] == 'i' { - f, err := strconv.ParseFloat(text[:len(text)-1], 64) - if err == nil { - n.IsComplex = true - n.Complex128 = complex(0, f) - n.simplifyComplex() - return n, nil - } - } - // Do integer test first so we get 0x123 etc. - u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below. - if err == nil { - n.IsUint = true - n.Uint64 = u - } - i, err := strconv.ParseInt(text, 0, 64) - if err == nil { - n.IsInt = true - n.Int64 = i - if i == 0 { - n.IsUint = true // in case of -0. - n.Uint64 = u - } - } - // If an integer extraction succeeded, promote the float. - if n.IsInt { - n.IsFloat = true - n.Float64 = float64(n.Int64) - } else if n.IsUint { - n.IsFloat = true - n.Float64 = float64(n.Uint64) - } else { - f, err := strconv.ParseFloat(text, 64) - if err == nil { - n.IsFloat = true - n.Float64 = f - // If a floating-point extraction succeeded, extract the int if needed. - if !n.IsInt && float64(int64(f)) == f { - n.IsInt = true - n.Int64 = int64(f) - } - if !n.IsUint && float64(uint64(f)) == f { - n.IsUint = true - n.Uint64 = uint64(f) - } - } - } - if !n.IsInt && !n.IsUint && !n.IsFloat { - return nil, fmt.Errorf("illegal number syntax: %q", text) - } - return n, nil -} - -// simplifyComplex pulls out any other types that are represented by the complex number. -// These all require that the imaginary part be zero. -func (n *NumberNode) simplifyComplex() { - n.IsFloat = imag(n.Complex128) == 0 - if n.IsFloat { - n.Float64 = real(n.Complex128) - n.IsInt = float64(int64(n.Float64)) == n.Float64 - if n.IsInt { - n.Int64 = int64(n.Float64) - } - n.IsUint = float64(uint64(n.Float64)) == n.Float64 - if n.IsUint { - n.Uint64 = uint64(n.Float64) - } - } -} - -func (n *NumberNode) String() string { - return n.Text -} - -func (n *NumberNode) tree() *Tree { - return n.tr -} - -func (n *NumberNode) Copy() Node { - nn := new(NumberNode) - *nn = *n // Easy, fast, correct. - return nn -} - -// StringNode holds a string constant. The value has been "unquoted". -type StringNode struct { - NodeType - Pos - tr *Tree - Quoted string // The original text of the string, with quotes. - Text string // The string, after quote processing. -} - -func (t *Tree) newString(pos Pos, orig, text string) *StringNode { - return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text} -} - -func (s *StringNode) String() string { - return s.Quoted -} - -func (s *StringNode) tree() *Tree { - return s.tr -} - -func (s *StringNode) Copy() Node { - return s.tr.newString(s.Pos, s.Quoted, s.Text) -} - -// endNode represents an {{end}} action. -// It does not appear in the final parse tree. -type endNode struct { - NodeType - Pos - tr *Tree -} - -func (t *Tree) newEnd(pos Pos) *endNode { - return &endNode{tr: t, NodeType: nodeEnd, Pos: pos} -} - -func (e *endNode) String() string { - return "{{end}}" -} - -func (e *endNode) tree() *Tree { - return e.tr -} - -func (e *endNode) Copy() Node { - return e.tr.newEnd(e.Pos) -} - -// elseNode represents an {{else}} action. Does not appear in the final tree. -type elseNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) -} - -func (t *Tree) newElse(pos Pos, line int) *elseNode { - return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line} -} - -func (e *elseNode) Type() NodeType { - return nodeElse -} - -func (e *elseNode) String() string { - return "{{else}}" -} - -func (e *elseNode) tree() *Tree { - return e.tr -} - -func (e *elseNode) Copy() Node { - return e.tr.newElse(e.Pos, e.Line) -} - -// BranchNode is the common representation of if, range, and with. -type BranchNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Pipe *PipeNode // The pipeline to be evaluated. - List *ListNode // What to execute if the value is non-empty. - ElseList *ListNode // What to execute if the value is empty (nil if absent). -} - -func (b *BranchNode) String() string { - name := "" - switch b.NodeType { - case NodeIf: - name = "if" - case NodeRange: - name = "range" - case NodeWith: - name = "with" - default: - panic("unknown branch type") - } - if b.ElseList != nil { - return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList) - } - return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List) -} - -func (b *BranchNode) tree() *Tree { - return b.tr -} - -func (b *BranchNode) Copy() Node { - switch b.NodeType { - case NodeIf: - return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeRange: - return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - case NodeWith: - return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList) - default: - panic("unknown branch type") - } -} - -// IfNode represents an {{if}} action and its commands. -type IfNode struct { - BranchNode -} - -func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode { - return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (i *IfNode) Copy() Node { - return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList()) -} - -// RangeNode represents a {{range}} action and its commands. -type RangeNode struct { - BranchNode -} - -func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode { - return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (r *RangeNode) Copy() Node { - return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList()) -} - -// WithNode represents a {{with}} action and its commands. -type WithNode struct { - BranchNode -} - -func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode { - return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}} -} - -func (w *WithNode) Copy() Node { - return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList()) -} - -// TemplateNode represents a {{template}} action. -type TemplateNode struct { - NodeType - Pos - tr *Tree - Line int // The line number in the input (deprecated; kept for compatibility) - Name string // The name of the template (unquoted). - Pipe *PipeNode // The command to evaluate as dot for the template. -} - -func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode { - return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe} -} - -func (t *TemplateNode) String() string { - if t.Pipe == nil { - return fmt.Sprintf("{{template %q}}", t.Name) - } - return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe) -} - -func (t *TemplateNode) tree() *Tree { - return t.tr -} - -func (t *TemplateNode) Copy() Node { - return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe()) -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go b/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go deleted file mode 100644 index 0d77ade87..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go +++ /dev/null @@ -1,700 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package parse builds parse trees for templates as defined by text/template -// and html/template. Clients should use those packages to construct templates -// rather than this one, which provides shared internal data structures not -// intended for general use. -package parse - -import ( - "bytes" - "fmt" - "runtime" - "strconv" - "strings" -) - -// Tree is the representation of a single parsed template. -type Tree struct { - Name string // name of the template represented by the tree. - ParseName string // name of the top-level template during parsing, for error messages. - Root *ListNode // top-level root of the tree. - text string // text parsed to create the template (or its parent) - // Parsing only; cleared after parse. - funcs []map[string]interface{} - lex *lexer - token [3]item // three-token lookahead for parser. - peekCount int - vars []string // variables defined at the moment. -} - -// Copy returns a copy of the Tree. Any parsing state is discarded. -func (t *Tree) Copy() *Tree { - if t == nil { - return nil - } - return &Tree{ - Name: t.Name, - ParseName: t.ParseName, - Root: t.Root.CopyList(), - text: t.text, - } -} - -// Parse returns a map from template name to parse.Tree, created by parsing the -// templates described in the argument string. The top-level template will be -// given the specified name. If an error is encountered, parsing stops and an -// empty map is returned with the error. -func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) { - treeSet = make(map[string]*Tree) - t := New(name) - t.text = text - _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...) - return -} - -// next returns the next token. -func (t *Tree) next() item { - if t.peekCount > 0 { - t.peekCount-- - } else { - t.token[0] = t.lex.nextItem() - } - return t.token[t.peekCount] -} - -// backup backs the input stream up one token. -func (t *Tree) backup() { - t.peekCount++ -} - -// backup2 backs the input stream up two tokens. -// The zeroth token is already there. -func (t *Tree) backup2(t1 item) { - t.token[1] = t1 - t.peekCount = 2 -} - -// backup3 backs the input stream up three tokens -// The zeroth token is already there. -func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back. - t.token[1] = t1 - t.token[2] = t2 - t.peekCount = 3 -} - -// peek returns but does not consume the next token. -func (t *Tree) peek() item { - if t.peekCount > 0 { - return t.token[t.peekCount-1] - } - t.peekCount = 1 - t.token[0] = t.lex.nextItem() - return t.token[0] -} - -// nextNonSpace returns the next non-space token. -func (t *Tree) nextNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - return token -} - -// peekNonSpace returns but does not consume the next non-space token. -func (t *Tree) peekNonSpace() (token item) { - for { - token = t.next() - if token.typ != itemSpace { - break - } - } - t.backup() - return token -} - -// Parsing. - -// New allocates a new parse tree with the given name. -func New(name string, funcs ...map[string]interface{}) *Tree { - return &Tree{ - Name: name, - funcs: funcs, - } -} - -// ErrorContext returns a textual representation of the location of the node in the input text. -// The receiver is only used when the node does not have a pointer to the tree inside, -// which can occur in old code. -func (t *Tree) ErrorContext(n Node) (location, context string) { - pos := int(n.Position()) - tree := n.tree() - if tree == nil { - tree = t - } - text := tree.text[:pos] - byteNum := strings.LastIndex(text, "\n") - if byteNum == -1 { - byteNum = pos // On first line. - } else { - byteNum++ // After the newline. - byteNum = pos - byteNum - } - lineNum := 1 + strings.Count(text, "\n") - context = n.String() - if len(context) > 20 { - context = fmt.Sprintf("%.20s...", context) - } - return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context -} - -// errorf formats the error and terminates processing. -func (t *Tree) errorf(format string, args ...interface{}) { - t.Root = nil - format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format) - panic(fmt.Errorf(format, args...)) -} - -// error terminates processing. -func (t *Tree) error(err error) { - t.errorf("%s", err) -} - -// expect consumes the next token and guarantees it has the required type. -func (t *Tree) expect(expected itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected { - t.unexpected(token, context) - } - return token -} - -// expectOneOf consumes the next token and guarantees it has one of the required types. -func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item { - token := t.nextNonSpace() - if token.typ != expected1 && token.typ != expected2 { - t.unexpected(token, context) - } - return token -} - -// unexpected complains about the token and terminates processing. -func (t *Tree) unexpected(token item, context string) { - t.errorf("unexpected %s in %s", token, context) -} - -// recover is the handler that turns panics into returns from the top level of Parse. -func (t *Tree) recover(errp *error) { - e := recover() - if e != nil { - if _, ok := e.(runtime.Error); ok { - panic(e) - } - if t != nil { - t.stopParse() - } - *errp = e.(error) - } - return -} - -// startParse initializes the parser, using the lexer. -func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) { - t.Root = nil - t.lex = lex - t.vars = []string{"$"} - t.funcs = funcs -} - -// stopParse terminates parsing. -func (t *Tree) stopParse() { - t.lex = nil - t.vars = nil - t.funcs = nil -} - -// Parse parses the template definition string to construct a representation of -// the template for execution. If either action delimiter string is empty, the -// default ("{{" or "}}") is used. Embedded template definitions are added to -// the treeSet map. -func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) { - defer t.recover(&err) - t.ParseName = t.Name - t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim)) - t.text = text - t.parse(treeSet) - t.add(treeSet) - t.stopParse() - return t, nil -} - -// add adds tree to the treeSet. -func (t *Tree) add(treeSet map[string]*Tree) { - tree := treeSet[t.Name] - if tree == nil || IsEmptyTree(tree.Root) { - treeSet[t.Name] = t - return - } - if !IsEmptyTree(t.Root) { - t.errorf("template: multiple definition of template %q", t.Name) - } -} - -// IsEmptyTree reports whether this tree (node) is empty of everything but space. -func IsEmptyTree(n Node) bool { - switch n := n.(type) { - case nil: - return true - case *ActionNode: - case *IfNode: - case *ListNode: - for _, node := range n.Nodes { - if !IsEmptyTree(node) { - return false - } - } - return true - case *RangeNode: - case *TemplateNode: - case *TextNode: - return len(bytes.TrimSpace(n.Text)) == 0 - case *WithNode: - default: - panic("unknown node: " + n.String()) - } - return false -} - -// parse is the top-level parser for a template, essentially the same -// as itemList except it also parses {{define}} actions. -// It runs to EOF. -func (t *Tree) parse(treeSet map[string]*Tree) (next Node) { - t.Root = t.newList(t.peek().pos) - for t.peek().typ != itemEOF { - if t.peek().typ == itemLeftDelim { - delim := t.next() - if t.nextNonSpace().typ == itemDefine { - newT := New("definition") // name will be updated once we know it. - newT.text = t.text - newT.ParseName = t.ParseName - newT.startParse(t.funcs, t.lex) - newT.parseDefinition(treeSet) - continue - } - t.backup2(delim) - } - n := t.textOrAction() - if n.Type() == nodeEnd { - t.errorf("unexpected %s", n) - } - t.Root.append(n) - } - return nil -} - -// parseDefinition parses a {{define}} ... {{end}} template definition and -// installs the definition in the treeSet map. The "define" keyword has already -// been scanned. -func (t *Tree) parseDefinition(treeSet map[string]*Tree) { - const context = "define clause" - name := t.expectOneOf(itemString, itemRawString, context) - var err error - t.Name, err = strconv.Unquote(name.val) - if err != nil { - t.error(err) - } - t.expect(itemRightDelim, context) - var end Node - t.Root, end = t.itemList() - if end.Type() != nodeEnd { - t.errorf("unexpected %s in %s", end, context) - } - t.add(treeSet) - t.stopParse() -} - -// itemList: -// textOrAction* -// Terminates at {{end}} or {{else}}, returned separately. -func (t *Tree) itemList() (list *ListNode, next Node) { - list = t.newList(t.peekNonSpace().pos) - for t.peekNonSpace().typ != itemEOF { - n := t.textOrAction() - switch n.Type() { - case nodeEnd, nodeElse: - return list, n - } - list.append(n) - } - t.errorf("unexpected EOF") - return -} - -// textOrAction: -// text | action -func (t *Tree) textOrAction() Node { - switch token := t.nextNonSpace(); token.typ { - case itemElideNewline: - return t.elideNewline() - case itemText: - return t.newText(token.pos, token.val) - case itemLeftDelim: - return t.action() - default: - t.unexpected(token, "input") - } - return nil -} - -// elideNewline: -// Remove newlines trailing rightDelim if \\ is present. -func (t *Tree) elideNewline() Node { - token := t.peek() - if token.typ != itemText { - t.unexpected(token, "input") - return nil - } - - t.next() - stripped := strings.TrimLeft(token.val, "\n\r") - diff := len(token.val) - len(stripped) - if diff > 0 { - // This is a bit nasty. We mutate the token in-place to remove - // preceding newlines. - token.pos += Pos(diff) - token.val = stripped - } - return t.newText(token.pos, token.val) -} - -// Action: -// control -// command ("|" command)* -// Left delim is past. Now get actions. -// First word could be a keyword such as range. -func (t *Tree) action() (n Node) { - switch token := t.nextNonSpace(); token.typ { - case itemElse: - return t.elseControl() - case itemEnd: - return t.endControl() - case itemIf: - return t.ifControl() - case itemRange: - return t.rangeControl() - case itemTemplate: - return t.templateControl() - case itemWith: - return t.withControl() - } - t.backup() - // Do not pop variables; they persist until "end". - return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command")) -} - -// Pipeline: -// declarations? command ('|' command)* -func (t *Tree) pipeline(context string) (pipe *PipeNode) { - var decl []*VariableNode - pos := t.peekNonSpace().pos - // Are there declarations? - for { - if v := t.peekNonSpace(); v.typ == itemVariable { - t.next() - // Since space is a token, we need 3-token look-ahead here in the worst case: - // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an - // argument variable rather than a declaration. So remember the token - // adjacent to the variable so we can push it back if necessary. - tokenAfterVariable := t.peek() - if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") { - t.nextNonSpace() - variable := t.newVariable(v.pos, v.val) - decl = append(decl, variable) - t.vars = append(t.vars, v.val) - if next.typ == itemChar && next.val == "," { - if context == "range" && len(decl) < 2 { - continue - } - t.errorf("too many declarations in %s", context) - } - } else if tokenAfterVariable.typ == itemSpace { - t.backup3(v, tokenAfterVariable) - } else { - t.backup2(v) - } - } - break - } - pipe = t.newPipeline(pos, t.lex.lineNumber(), decl) - for { - switch token := t.nextNonSpace(); token.typ { - case itemRightDelim, itemRightParen: - if len(pipe.Cmds) == 0 { - t.errorf("missing value for %s", context) - } - if token.typ == itemRightParen { - t.backup() - } - return - case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier, - itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen: - t.backup() - pipe.append(t.command()) - default: - t.unexpected(token, context) - } - } -} - -func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { - defer t.popVars(len(t.vars)) - line = t.lex.lineNumber() - pipe = t.pipeline(context) - var next Node - list, next = t.itemList() - switch next.Type() { - case nodeEnd: //done - case nodeElse: - if allowElseIf { - // Special case for "else if". If the "else" is followed immediately by an "if", - // the elseControl will have left the "if" token pending. Treat - // {{if a}}_{{else if b}}_{{end}} - // as - // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. - // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} - // is assumed. This technique works even for long if-else-if chains. - // TODO: Should we allow else-if in with and range? - if t.peek().typ == itemIf { - t.next() // Consume the "if" token. - elseList = t.newList(next.Position()) - elseList.append(t.ifControl()) - // Do not consume the next item - only one {{end}} required. - break - } - } - elseList, next = t.itemList() - if next.Type() != nodeEnd { - t.errorf("expected end; found %s", next) - } - } - return pipe.Position(), line, pipe, list, elseList -} - -// If: -// {{if pipeline}} itemList {{end}} -// {{if pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) ifControl() Node { - return t.newIf(t.parseControl(true, "if")) -} - -// Range: -// {{range pipeline}} itemList {{end}} -// {{range pipeline}} itemList {{else}} itemList {{end}} -// Range keyword is past. -func (t *Tree) rangeControl() Node { - return t.newRange(t.parseControl(false, "range")) -} - -// With: -// {{with pipeline}} itemList {{end}} -// {{with pipeline}} itemList {{else}} itemList {{end}} -// If keyword is past. -func (t *Tree) withControl() Node { - return t.newWith(t.parseControl(false, "with")) -} - -// End: -// {{end}} -// End keyword is past. -func (t *Tree) endControl() Node { - return t.newEnd(t.expect(itemRightDelim, "end").pos) -} - -// Else: -// {{else}} -// Else keyword is past. -func (t *Tree) elseControl() Node { - // Special case for "else if". - peek := t.peekNonSpace() - if peek.typ == itemIf { - // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". - return t.newElse(peek.pos, t.lex.lineNumber()) - } - return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber()) -} - -// Template: -// {{template stringValue pipeline}} -// Template keyword is past. The name must be something that can evaluate -// to a string. -func (t *Tree) templateControl() Node { - var name string - token := t.nextNonSpace() - switch token.typ { - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - name = s - default: - t.unexpected(token, "template invocation") - } - var pipe *PipeNode - if t.nextNonSpace().typ != itemRightDelim { - t.backup() - // Do not pop variables; they persist until "end". - pipe = t.pipeline("template") - } - return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe) -} - -// command: -// operand (space operand)* -// space-separated arguments up to a pipeline character or right delimiter. -// we consume the pipe character but leave the right delim to terminate the action. -func (t *Tree) command() *CommandNode { - cmd := t.newCommand(t.peekNonSpace().pos) - for { - t.peekNonSpace() // skip leading spaces. - operand := t.operand() - if operand != nil { - cmd.append(operand) - } - switch token := t.next(); token.typ { - case itemSpace: - continue - case itemError: - t.errorf("%s", token.val) - case itemRightDelim, itemRightParen: - t.backup() - case itemPipe: - default: - t.errorf("unexpected %s in operand; missing space?", token) - } - break - } - if len(cmd.Args) == 0 { - t.errorf("empty command") - } - return cmd -} - -// operand: -// term .Field* -// An operand is a space-separated component of a command, -// a term possibly followed by field accesses. -// A nil return means the next item is not an operand. -func (t *Tree) operand() Node { - node := t.term() - if node == nil { - return nil - } - if t.peek().typ == itemField { - chain := t.newChain(t.peek().pos, node) - for t.peek().typ == itemField { - chain.Add(t.next().val) - } - // Compatibility with original API: If the term is of type NodeField - // or NodeVariable, just put more fields on the original. - // Otherwise, keep the Chain node. - // TODO: Switch to Chains always when we can. - switch node.Type() { - case NodeField: - node = t.newField(chain.Position(), chain.String()) - case NodeVariable: - node = t.newVariable(chain.Position(), chain.String()) - default: - node = chain - } - } - return node -} - -// term: -// literal (number, string, nil, boolean) -// function (identifier) -// . -// .Field -// $ -// '(' pipeline ')' -// A term is a simple "expression". -// A nil return means the next item is not a term. -func (t *Tree) term() Node { - switch token := t.nextNonSpace(); token.typ { - case itemError: - t.errorf("%s", token.val) - case itemIdentifier: - if !t.hasFunction(token.val) { - t.errorf("function %q not defined", token.val) - } - return NewIdentifier(token.val).SetTree(t).SetPos(token.pos) - case itemDot: - return t.newDot(token.pos) - case itemNil: - return t.newNil(token.pos) - case itemVariable: - return t.useVar(token.pos, token.val) - case itemField: - return t.newField(token.pos, token.val) - case itemBool: - return t.newBool(token.pos, token.val == "true") - case itemCharConstant, itemComplex, itemNumber: - number, err := t.newNumber(token.pos, token.val, token.typ) - if err != nil { - t.error(err) - } - return number - case itemLeftParen: - pipe := t.pipeline("parenthesized pipeline") - if token := t.next(); token.typ != itemRightParen { - t.errorf("unclosed right paren: unexpected %s", token) - } - return pipe - case itemString, itemRawString: - s, err := strconv.Unquote(token.val) - if err != nil { - t.error(err) - } - return t.newString(token.pos, token.val, s) - } - t.backup() - return nil -} - -// hasFunction reports if a function name exists in the Tree's maps. -func (t *Tree) hasFunction(name string) bool { - for _, funcMap := range t.funcs { - if funcMap == nil { - continue - } - if funcMap[name] != nil { - return true - } - } - return false -} - -// popVars trims the variable list to the specified length -func (t *Tree) popVars(n int) { - t.vars = t.vars[:n] -} - -// useVar returns a node for a variable reference. It errors if the -// variable is not defined. -func (t *Tree) useVar(pos Pos, name string) Node { - v := t.newVariable(pos, name) - for _, varName := range t.vars { - if varName == v.Ident[0] { - return v - } - } - t.errorf("undefined variable %q", v.Ident[0]) - return nil -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse_test.go b/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse_test.go deleted file mode 100644 index c73640fb8..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse_test.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package parse - -import ( - "flag" - "fmt" - "strings" - "testing" -) - -var debug = flag.Bool("debug", false, "show the errors produced by the main tests") - -type numberTest struct { - text string - isInt bool - isUint bool - isFloat bool - isComplex bool - int64 - uint64 - float64 - complex128 -} - -var numberTests = []numberTest{ - // basics - {"0", true, true, true, false, 0, 0, 0, 0}, - {"-0", true, true, true, false, 0, 0, 0, 0}, // check that -0 is a uint. - {"73", true, true, true, false, 73, 73, 73, 0}, - {"073", true, true, true, false, 073, 073, 073, 0}, - {"0x73", true, true, true, false, 0x73, 0x73, 0x73, 0}, - {"-73", true, false, true, false, -73, 0, -73, 0}, - {"+73", true, false, true, false, 73, 0, 73, 0}, - {"100", true, true, true, false, 100, 100, 100, 0}, - {"1e9", true, true, true, false, 1e9, 1e9, 1e9, 0}, - {"-1e9", true, false, true, false, -1e9, 0, -1e9, 0}, - {"-1.2", false, false, true, false, 0, 0, -1.2, 0}, - {"1e19", false, true, true, false, 0, 1e19, 1e19, 0}, - {"-1e19", false, false, true, false, 0, 0, -1e19, 0}, - {"4i", false, false, false, true, 0, 0, 0, 4i}, - {"-1.2+4.2i", false, false, false, true, 0, 0, 0, -1.2 + 4.2i}, - {"073i", false, false, false, true, 0, 0, 0, 73i}, // not octal! - // complex with 0 imaginary are float (and maybe integer) - {"0i", true, true, true, true, 0, 0, 0, 0}, - {"-1.2+0i", false, false, true, true, 0, 0, -1.2, -1.2}, - {"-12+0i", true, false, true, true, -12, 0, -12, -12}, - {"13+0i", true, true, true, true, 13, 13, 13, 13}, - // funny bases - {"0123", true, true, true, false, 0123, 0123, 0123, 0}, - {"-0x0", true, true, true, false, 0, 0, 0, 0}, - {"0xdeadbeef", true, true, true, false, 0xdeadbeef, 0xdeadbeef, 0xdeadbeef, 0}, - // character constants - {`'a'`, true, true, true, false, 'a', 'a', 'a', 0}, - {`'\n'`, true, true, true, false, '\n', '\n', '\n', 0}, - {`'\\'`, true, true, true, false, '\\', '\\', '\\', 0}, - {`'\''`, true, true, true, false, '\'', '\'', '\'', 0}, - {`'\xFF'`, true, true, true, false, 0xFF, 0xFF, 0xFF, 0}, - {`'パ'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0}, - {`'\u30d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0}, - {`'\U000030d1'`, true, true, true, false, 0x30d1, 0x30d1, 0x30d1, 0}, - // some broken syntax - {text: "+-2"}, - {text: "0x123."}, - {text: "1e."}, - {text: "0xi."}, - {text: "1+2."}, - {text: "'x"}, - {text: "'xx'"}, - // Issue 8622 - 0xe parsed as floating point. Very embarrassing. - {"0xef", true, true, true, false, 0xef, 0xef, 0xef, 0}, -} - -func TestNumberParse(t *testing.T) { - for _, test := range numberTests { - // If fmt.Sscan thinks it's complex, it's complex. We can't trust the output - // because imaginary comes out as a number. - var c complex128 - typ := itemNumber - var tree *Tree - if test.text[0] == '\'' { - typ = itemCharConstant - } else { - _, err := fmt.Sscan(test.text, &c) - if err == nil { - typ = itemComplex - } - } - n, err := tree.newNumber(0, test.text, typ) - ok := test.isInt || test.isUint || test.isFloat || test.isComplex - if ok && err != nil { - t.Errorf("unexpected error for %q: %s", test.text, err) - continue - } - if !ok && err == nil { - t.Errorf("expected error for %q", test.text) - continue - } - if !ok { - if *debug { - fmt.Printf("%s\n\t%s\n", test.text, err) - } - continue - } - if n.IsComplex != test.isComplex { - t.Errorf("complex incorrect for %q; should be %t", test.text, test.isComplex) - } - if test.isInt { - if !n.IsInt { - t.Errorf("expected integer for %q", test.text) - } - if n.Int64 != test.int64 { - t.Errorf("int64 for %q should be %d Is %d", test.text, test.int64, n.Int64) - } - } else if n.IsInt { - t.Errorf("did not expect integer for %q", test.text) - } - if test.isUint { - if !n.IsUint { - t.Errorf("expected unsigned integer for %q", test.text) - } - if n.Uint64 != test.uint64 { - t.Errorf("uint64 for %q should be %d Is %d", test.text, test.uint64, n.Uint64) - } - } else if n.IsUint { - t.Errorf("did not expect unsigned integer for %q", test.text) - } - if test.isFloat { - if !n.IsFloat { - t.Errorf("expected float for %q", test.text) - } - if n.Float64 != test.float64 { - t.Errorf("float64 for %q should be %g Is %g", test.text, test.float64, n.Float64) - } - } else if n.IsFloat { - t.Errorf("did not expect float for %q", test.text) - } - if test.isComplex { - if !n.IsComplex { - t.Errorf("expected complex for %q", test.text) - } - if n.Complex128 != test.complex128 { - t.Errorf("complex128 for %q should be %g Is %g", test.text, test.complex128, n.Complex128) - } - } else if n.IsComplex { - t.Errorf("did not expect complex for %q", test.text) - } - } -} - -type parseTest struct { - name string - input string - ok bool - result string // what the user would see in an error message. -} - -const ( - noError = true - hasError = false -) - -var parseTests = []parseTest{ - {"empty", "", noError, - ``}, - {"comment", "{{/*\n\n\n*/}}", noError, - ``}, - {"spaces", " \t\n", noError, - `" \t\n"`}, - {"text", "some text", noError, - `"some text"`}, - {"emptyAction", "{{}}", hasError, - `{{}}`}, - {"field", "{{.X}}", noError, - `{{.X}}`}, - {"simple command", "{{printf}}", noError, - `{{printf}}`}, - {"$ invocation", "{{$}}", noError, - "{{$}}"}, - {"variable invocation", "{{with $x := 3}}{{$x 23}}{{end}}", noError, - "{{with $x := 3}}{{$x 23}}{{end}}"}, - {"variable with fields", "{{$.I}}", noError, - "{{$.I}}"}, - {"multi-word command", "{{printf `%d` 23}}", noError, - "{{printf `%d` 23}}"}, - {"pipeline", "{{.X|.Y}}", noError, - `{{.X | .Y}}`}, - {"pipeline with decl", "{{$x := .X|.Y}}", noError, - `{{$x := .X | .Y}}`}, - {"nested pipeline", "{{.X (.Y .Z) (.A | .B .C) (.E)}}", noError, - `{{.X (.Y .Z) (.A | .B .C) (.E)}}`}, - {"field applied to parentheses", "{{(.Y .Z).Field}}", noError, - `{{(.Y .Z).Field}}`}, - {"simple if", "{{if .X}}hello{{end}}", noError, - `{{if .X}}"hello"{{end}}`}, - {"if with else", "{{if .X}}true{{else}}false{{end}}", noError, - `{{if .X}}"true"{{else}}"false"{{end}}`}, - {"if with else if", "{{if .X}}true{{else if .Y}}false{{end}}", noError, - `{{if .X}}"true"{{else}}{{if .Y}}"false"{{end}}{{end}}`}, - {"if else chain", "+{{if .X}}X{{else if .Y}}Y{{else if .Z}}Z{{end}}+", noError, - `"+"{{if .X}}"X"{{else}}{{if .Y}}"Y"{{else}}{{if .Z}}"Z"{{end}}{{end}}{{end}}"+"`}, - {"simple range", "{{range .X}}hello{{end}}", noError, - `{{range .X}}"hello"{{end}}`}, - {"chained field range", "{{range .X.Y.Z}}hello{{end}}", noError, - `{{range .X.Y.Z}}"hello"{{end}}`}, - {"nested range", "{{range .X}}hello{{range .Y}}goodbye{{end}}{{end}}", noError, - `{{range .X}}"hello"{{range .Y}}"goodbye"{{end}}{{end}}`}, - {"range with else", "{{range .X}}true{{else}}false{{end}}", noError, - `{{range .X}}"true"{{else}}"false"{{end}}`}, - {"range over pipeline", "{{range .X|.M}}true{{else}}false{{end}}", noError, - `{{range .X | .M}}"true"{{else}}"false"{{end}}`}, - {"range []int", "{{range .SI}}{{.}}{{end}}", noError, - `{{range .SI}}{{.}}{{end}}`}, - {"range 1 var", "{{range $x := .SI}}{{.}}{{end}}", noError, - `{{range $x := .SI}}{{.}}{{end}}`}, - {"range 2 vars", "{{range $x, $y := .SI}}{{.}}{{end}}", noError, - `{{range $x, $y := .SI}}{{.}}{{end}}`}, - {"constants", "{{range .SI 1 -3.2i true false 'a' nil}}{{end}}", noError, - `{{range .SI 1 -3.2i true false 'a' nil}}{{end}}`}, - {"template", "{{template `x`}}", noError, - `{{template "x"}}`}, - {"template with arg", "{{template `x` .Y}}", noError, - `{{template "x" .Y}}`}, - {"with", "{{with .X}}hello{{end}}", noError, - `{{with .X}}"hello"{{end}}`}, - {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError, - `{{with .X}}"hello"{{else}}"goodbye"{{end}}`}, - {"elide newline", "{{true}}\\\n ", noError, - `{{true}}" "`}, - // Errors. - {"unclosed action", "hello{{range", hasError, ""}, - {"unmatched end", "{{end}}", hasError, ""}, - {"missing end", "hello{{range .x}}", hasError, ""}, - {"missing end after else", "hello{{range .x}}{{else}}", hasError, ""}, - {"undefined function", "hello{{undefined}}", hasError, ""}, - {"undefined variable", "{{$x}}", hasError, ""}, - {"variable undefined after end", "{{with $x := 4}}{{end}}{{$x}}", hasError, ""}, - {"variable undefined in template", "{{template $v}}", hasError, ""}, - {"declare with field", "{{with $x.Y := 4}}{{end}}", hasError, ""}, - {"template with field ref", "{{template .X}}", hasError, ""}, - {"template with var", "{{template $v}}", hasError, ""}, - {"invalid punctuation", "{{printf 3, 4}}", hasError, ""}, - {"multidecl outside range", "{{with $v, $u := 3}}{{end}}", hasError, ""}, - {"too many decls in range", "{{range $u, $v, $w := 3}}{{end}}", hasError, ""}, - {"dot applied to parentheses", "{{printf (printf .).}}", hasError, ""}, - {"adjacent args", "{{printf 3`x`}}", hasError, ""}, - {"adjacent args with .", "{{printf `x`.}}", hasError, ""}, - {"extra end after if", "{{if .X}}a{{else if .Y}}b{{end}}{{end}}", hasError, ""}, - {"invalid newline elision", "{{true}}\\{{true}}", hasError, ""}, - // Equals (and other chars) do not assignments make (yet). - {"bug0a", "{{$x := 0}}{{$x}}", noError, "{{$x := 0}}{{$x}}"}, - {"bug0b", "{{$x = 1}}{{$x}}", hasError, ""}, - {"bug0c", "{{$x ! 2}}{{$x}}", hasError, ""}, - {"bug0d", "{{$x % 3}}{{$x}}", hasError, ""}, - // Check the parse fails for := rather than comma. - {"bug0e", "{{range $x := $y := 3}}{{end}}", hasError, ""}, - // Another bug: variable read must ignore following punctuation. - {"bug1a", "{{$x:=.}}{{$x!2}}", hasError, ""}, // ! is just illegal here. - {"bug1b", "{{$x:=.}}{{$x+2}}", hasError, ""}, // $x+2 should not parse as ($x) (+2). - {"bug1c", "{{$x:=.}}{{$x +2}}", noError, "{{$x := .}}{{$x +2}}"}, // It's OK with a space. -} - -var builtins = map[string]interface{}{ - "printf": fmt.Sprintf, -} - -func testParse(doCopy bool, t *testing.T) { - textFormat = "%q" - defer func() { textFormat = "%s" }() - for _, test := range parseTests { - tmpl, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree), builtins) - switch { - case err == nil && !test.ok: - t.Errorf("%q: expected error; got none", test.name) - continue - case err != nil && test.ok: - t.Errorf("%q: unexpected error: %v", test.name, err) - continue - case err != nil && !test.ok: - // expected error, got one - if *debug { - fmt.Printf("%s: %s\n\t%s\n", test.name, test.input, err) - } - continue - } - var result string - if doCopy { - result = tmpl.Root.Copy().String() - } else { - result = tmpl.Root.String() - } - if result != test.result { - t.Errorf("%s=(%q): got\n\t%v\nexpected\n\t%v", test.name, test.input, result, test.result) - } - } -} - -func TestParse(t *testing.T) { - testParse(false, t) -} - -// Same as TestParse, but we copy the node first -func TestParseCopy(t *testing.T) { - testParse(true, t) -} - -type isEmptyTest struct { - name string - input string - empty bool -} - -var isEmptyTests = []isEmptyTest{ - {"empty", ``, true}, - {"nonempty", `hello`, false}, - {"spaces only", " \t\n \t\n", true}, - {"definition", `{{define "x"}}something{{end}}`, true}, - {"definitions and space", "{{define `x`}}something{{end}}\n\n{{define `y`}}something{{end}}\n\n", true}, - {"definitions and text", "{{define `x`}}something{{end}}\nx\n{{define `y`}}something{{end}}\ny\n", false}, - {"definition and action", "{{define `x`}}something{{end}}{{if 3}}foo{{end}}", false}, -} - -func TestIsEmpty(t *testing.T) { - if !IsEmptyTree(nil) { - t.Errorf("nil tree is not empty") - } - for _, test := range isEmptyTests { - tree, err := New("root").Parse(test.input, "", "", make(map[string]*Tree), nil) - if err != nil { - t.Errorf("%q: unexpected error: %v", test.name, err) - continue - } - if empty := IsEmptyTree(tree.Root); empty != test.empty { - t.Errorf("%q: expected %t got %t", test.name, test.empty, empty) - } - } -} - -func TestErrorContextWithTreeCopy(t *testing.T) { - tree, err := New("root").Parse("{{if true}}{{end}}", "", "", make(map[string]*Tree), nil) - if err != nil { - t.Fatalf("unexpected tree parse failure: %v", err) - } - treeCopy := tree.Copy() - wantLocation, wantContext := tree.ErrorContext(tree.Root.Nodes[0]) - gotLocation, gotContext := treeCopy.ErrorContext(treeCopy.Root.Nodes[0]) - if wantLocation != gotLocation { - t.Errorf("wrong error location want %q got %q", wantLocation, gotLocation) - } - if wantContext != gotContext { - t.Errorf("wrong error location want %q got %q", wantContext, gotContext) - } -} - -// All failures, and the result is a string that must appear in the error message. -var errorTests = []parseTest{ - // Check line numbers are accurate. - {"unclosed1", - "line1\n{{", - hasError, `unclosed1:2: unexpected unclosed action in command`}, - {"unclosed2", - "line1\n{{define `x`}}line2\n{{", - hasError, `unclosed2:3: unexpected unclosed action in command`}, - // Specific errors. - {"function", - "{{foo}}", - hasError, `function "foo" not defined`}, - {"comment", - "{{/*}}", - hasError, `unclosed comment`}, - {"lparen", - "{{.X (1 2 3}}", - hasError, `unclosed left paren`}, - {"rparen", - "{{.X 1 2 3)}}", - hasError, `unexpected ")"`}, - {"space", - "{{`x`3}}", - hasError, `missing space?`}, - {"idchar", - "{{a#}}", - hasError, `'#'`}, - {"charconst", - "{{'a}}", - hasError, `unterminated character constant`}, - {"stringconst", - `{{"a}}`, - hasError, `unterminated quoted string`}, - {"rawstringconst", - "{{`a}}", - hasError, `unterminated raw quoted string`}, - {"number", - "{{0xi}}", - hasError, `number syntax`}, - {"multidefine", - "{{define `a`}}a{{end}}{{define `a`}}b{{end}}", - hasError, `multiple definition of template`}, - {"eof", - "{{range .X}}", - hasError, `unexpected EOF`}, - {"variable", - // Declare $x so it's defined, to avoid that error, and then check we don't parse a declaration. - "{{$x := 23}}{{with $x.y := 3}}{{$x 23}}{{end}}", - hasError, `unexpected ":="`}, - {"multidecl", - "{{$a,$b,$c := 23}}", - hasError, `too many declarations`}, - {"undefvar", - "{{$a}}", - hasError, `undefined variable`}, -} - -func TestErrors(t *testing.T) { - for _, test := range errorTests { - _, err := New(test.name).Parse(test.input, "", "", make(map[string]*Tree)) - if err == nil { - t.Errorf("%q: expected error", test.name) - continue - } - if !strings.Contains(err.Error(), test.result) { - t.Errorf("%q: error %q does not contain %q", test.name, err, test.result) - } - } -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/template.go b/Godeps/_workspace/src/github.com/alecthomas/template/template.go deleted file mode 100644 index 2eabfdd51..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/template.go +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package template - -import ( - "fmt" - "github.com/alecthomas/template/parse" - "reflect" -) - -// common holds the information shared by related templates. -type common struct { - tmpl map[string]*Template - // We use two maps, one for parsing and one for execution. - // This separation makes the API cleaner since it doesn't - // expose reflection to the client. - parseFuncs FuncMap - execFuncs map[string]reflect.Value -} - -// Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated -// as unexported by all other clients. -type Template struct { - name string - *parse.Tree - *common - leftDelim string - rightDelim string -} - -// New allocates a new template with the given name. -func New(name string) *Template { - return &Template{ - name: name, - } -} - -// Name returns the name of the template. -func (t *Template) Name() string { - return t.name -} - -// New allocates a new template associated with the given one and with the same -// delimiters. The association, which is transitive, allows one template to -// invoke another with a {{template}} action. -func (t *Template) New(name string) *Template { - t.init() - return &Template{ - name: name, - common: t.common, - leftDelim: t.leftDelim, - rightDelim: t.rightDelim, - } -} - -func (t *Template) init() { - if t.common == nil { - t.common = new(common) - t.tmpl = make(map[string]*Template) - t.parseFuncs = make(FuncMap) - t.execFuncs = make(map[string]reflect.Value) - } -} - -// Clone returns a duplicate of the template, including all associated -// templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add -// templates to the copy but not to the original. Clone can be used to prepare -// common templates and use them with variant definitions for other templates -// by adding the variants after the clone is made. -func (t *Template) Clone() (*Template, error) { - nt := t.copy(nil) - nt.init() - nt.tmpl[t.name] = nt - for k, v := range t.tmpl { - if k == t.name { // Already installed. - continue - } - // The associated templates share nt's common structure. - tmpl := v.copy(nt.common) - nt.tmpl[k] = tmpl - } - for k, v := range t.parseFuncs { - nt.parseFuncs[k] = v - } - for k, v := range t.execFuncs { - nt.execFuncs[k] = v - } - return nt, nil -} - -// copy returns a shallow copy of t, with common set to the argument. -func (t *Template) copy(c *common) *Template { - nt := New(t.name) - nt.Tree = t.Tree - nt.common = c - nt.leftDelim = t.leftDelim - nt.rightDelim = t.rightDelim - return nt -} - -// AddParseTree creates a new template with the name and parse tree -// and associates it with t. -func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) { - if t.common != nil && t.tmpl[name] != nil { - return nil, fmt.Errorf("template: redefinition of template %q", name) - } - nt := t.New(name) - nt.Tree = tree - t.tmpl[name] = nt - return nt, nil -} - -// Templates returns a slice of the templates associated with t, including t -// itself. -func (t *Template) Templates() []*Template { - if t.common == nil { - return nil - } - // Return a slice so we don't expose the map. - m := make([]*Template, 0, len(t.tmpl)) - for _, v := range t.tmpl { - m = append(m, v) - } - return m -} - -// Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template -// definitions will inherit the settings. An empty delimiter stands for the -// corresponding default: {{ or }}. -// The return value is the template, so calls can be chained. -func (t *Template) Delims(left, right string) *Template { - t.leftDelim = left - t.rightDelim = right - return t -} - -// Funcs adds the elements of the argument map to the template's function map. -// It panics if a value in the map is not a function with appropriate return -// type. However, it is legal to overwrite elements of the map. The return -// value is the template, so calls can be chained. -func (t *Template) Funcs(funcMap FuncMap) *Template { - t.init() - addValueFuncs(t.execFuncs, funcMap) - addFuncs(t.parseFuncs, funcMap) - return t -} - -// Lookup returns the template with the given name that is associated with t, -// or nil if there is no such template. -func (t *Template) Lookup(name string) *Template { - if t.common == nil { - return nil - } - return t.tmpl[name] -} - -// Parse parses a string into a template. Nested template definitions will be -// associated with the top-level template t. Parse may be called multiple times -// to parse definitions of templates to associate with t. It is an error if a -// resulting template is non-empty (contains content other than template -// definitions) and would replace a non-empty template with the same name. -// (In multiple calls to Parse with the same receiver template, only one call -// can contain text other than space, comments, and template definitions.) -func (t *Template) Parse(text string) (*Template, error) { - t.init() - trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins) - if err != nil { - return nil, err - } - // Add the newly parsed trees, including the one for t, into our common structure. - for name, tree := range trees { - // If the name we parsed is the name of this template, overwrite this template. - // The associate method checks it's not a redefinition. - tmpl := t - if name != t.name { - tmpl = t.New(name) - } - // Even if t == tmpl, we need to install it in the common.tmpl map. - if replace, err := t.associate(tmpl, tree); err != nil { - return nil, err - } else if replace { - tmpl.Tree = tree - } - tmpl.leftDelim = t.leftDelim - tmpl.rightDelim = t.rightDelim - } - return t, nil -} - -// associate installs the new template into the group of templates associated -// with t. It is an error to reuse a name except to overwrite an empty -// template. The two are already known to share the common structure. -// The boolean return value reports wither to store this tree as t.Tree. -func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) { - if new.common != t.common { - panic("internal error: associate not common") - } - name := new.name - if old := t.tmpl[name]; old != nil { - oldIsEmpty := parse.IsEmptyTree(old.Root) - newIsEmpty := parse.IsEmptyTree(tree.Root) - if newIsEmpty { - // Whether old is empty or not, new is empty; no reason to replace old. - return false, nil - } - if !oldIsEmpty { - return false, fmt.Errorf("template: redefinition of template %q", name) - } - } - t.tmpl[name] = new - return true, nil -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/file1.tmpl b/Godeps/_workspace/src/github.com/alecthomas/template/testdata/file1.tmpl deleted file mode 100644 index febf9d9f8..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/file1.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -{{define "x"}}TEXT{{end}} -{{define "dotV"}}{{.V}}{{end}} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/file2.tmpl b/Godeps/_workspace/src/github.com/alecthomas/template/testdata/file2.tmpl deleted file mode 100644 index 39bf6fb9e..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/file2.tmpl +++ /dev/null @@ -1,2 +0,0 @@ -{{define "dot"}}{{.}}{{end}} -{{define "nested"}}{{template "dot" .}}{{end}} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl1.tmpl b/Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl1.tmpl deleted file mode 100644 index b72b3a340..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl1.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -template1 -{{define "x"}}x{{end}} -{{template "y"}} diff --git a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl2.tmpl b/Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl2.tmpl deleted file mode 100644 index 16beba6e7..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/template/testdata/tmpl2.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -template2 -{{define "y"}}y{{end}} -{{template "x"}} diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/COPYING b/Godeps/_workspace/src/github.com/alecthomas/units/COPYING deleted file mode 100644 index 2993ec085..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/COPYING +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (C) 2014 Alec Thomas - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/README.md b/Godeps/_workspace/src/github.com/alecthomas/units/README.md deleted file mode 100644 index bee884e3c..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Units - Helpful unit multipliers and functions for Go - -The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package. - -It allows for code like this: - -```go -n, err := ParseBase2Bytes("1KB") -// n == 1024 -n = units.Mebibyte * 512 -``` diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/bytes.go b/Godeps/_workspace/src/github.com/alecthomas/units/bytes.go deleted file mode 100644 index eaadeb800..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/bytes.go +++ /dev/null @@ -1,83 +0,0 @@ -package units - -// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte, -// etc.). -type Base2Bytes int64 - -// Base-2 byte units. -const ( - Kibibyte Base2Bytes = 1024 - KiB = Kibibyte - Mebibyte = Kibibyte * 1024 - MiB = Mebibyte - Gibibyte = Mebibyte * 1024 - GiB = Gibibyte - Tebibyte = Gibibyte * 1024 - TiB = Tebibyte - Pebibyte = Tebibyte * 1024 - PiB = Pebibyte - Exbibyte = Pebibyte * 1024 - EiB = Exbibyte -) - -var ( - bytesUnitMap = MakeUnitMap("iB", "B", 1024) - oldBytesUnitMap = MakeUnitMap("B", "B", 1024) -) - -// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB -// and KiB are both 1024. -func ParseBase2Bytes(s string) (Base2Bytes, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, oldBytesUnitMap) - } - return Base2Bytes(n), err -} - -func (b Base2Bytes) String() string { - return ToString(int64(b), 1024, "iB", "B") -} - -var ( - metricBytesUnitMap = MakeUnitMap("B", "B", 1000) -) - -// MetricBytes are SI byte units (1000 bytes in a kilobyte). -type MetricBytes SI - -// SI base-10 byte units. -const ( - Kilobyte MetricBytes = 1000 - KB = Kilobyte - Megabyte = Kilobyte * 1000 - MB = Megabyte - Gigabyte = Megabyte * 1000 - GB = Gigabyte - Terabyte = Gigabyte * 1000 - TB = Terabyte - Petabyte = Terabyte * 1000 - PB = Petabyte - Exabyte = Petabyte * 1000 - EB = Exabyte -) - -// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes. -func ParseMetricBytes(s string) (MetricBytes, error) { - n, err := ParseUnit(s, metricBytesUnitMap) - return MetricBytes(n), err -} - -func (m MetricBytes) String() string { - return ToString(int64(m), 1000, "B", "B") -} - -// ParseStrictBytes supports both iB and B suffixes for base 2 and metric, -// respectively. That is, KiB represents 1024 and KB represents 1000. -func ParseStrictBytes(s string) (int64, error) { - n, err := ParseUnit(s, bytesUnitMap) - if err != nil { - n, err = ParseUnit(s, metricBytesUnitMap) - } - return int64(n), err -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/bytes_test.go b/Godeps/_workspace/src/github.com/alecthomas/units/bytes_test.go deleted file mode 100644 index d4317aa58..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/bytes_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package units - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestBase2BytesString(t *testing.T) { - assert.Equal(t, Base2Bytes(0).String(), "0B") - assert.Equal(t, Base2Bytes(1025).String(), "1KiB1B") - assert.Equal(t, Base2Bytes(1048577).String(), "1MiB1B") -} - -func TestParseBase2Bytes(t *testing.T) { - n, err := ParseBase2Bytes("0B") - assert.NoError(t, err) - assert.Equal(t, 0, n) - n, err = ParseBase2Bytes("1KB") - assert.NoError(t, err) - assert.Equal(t, 1024, n) - n, err = ParseBase2Bytes("1MB1KB25B") - assert.NoError(t, err) - assert.Equal(t, 1049625, n) - n, err = ParseBase2Bytes("1.5MB") - assert.NoError(t, err) - assert.Equal(t, 1572864, n) -} - -func TestMetricBytesString(t *testing.T) { - assert.Equal(t, MetricBytes(0).String(), "0B") - assert.Equal(t, MetricBytes(1001).String(), "1KB1B") - assert.Equal(t, MetricBytes(1001025).String(), "1MB1KB25B") -} - -func TestParseMetricBytes(t *testing.T) { - n, err := ParseMetricBytes("0B") - assert.NoError(t, err) - assert.Equal(t, 0, n) - n, err = ParseMetricBytes("1KB1B") - assert.NoError(t, err) - assert.Equal(t, 1001, n) - n, err = ParseMetricBytes("1MB1KB25B") - assert.NoError(t, err) - assert.Equal(t, 1001025, n) - n, err = ParseMetricBytes("1.5MB") - assert.NoError(t, err) - assert.Equal(t, 1500000, n) -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/doc.go b/Godeps/_workspace/src/github.com/alecthomas/units/doc.go deleted file mode 100644 index 156ae3867..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package units provides helpful unit multipliers and functions for Go. -// -// The goal of this package is to have functionality similar to the time [1] package. -// -// -// [1] http://golang.org/pkg/time/ -// -// It allows for code like this: -// -// n, err := ParseBase2Bytes("1KB") -// // n == 1024 -// n = units.Mebibyte * 512 -package units diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/si.go b/Godeps/_workspace/src/github.com/alecthomas/units/si.go deleted file mode 100644 index 8234a9d52..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/si.go +++ /dev/null @@ -1,26 +0,0 @@ -package units - -// SI units. -type SI int64 - -// SI unit multiples. -const ( - Kilo SI = 1000 - Mega = Kilo * 1000 - Giga = Mega * 1000 - Tera = Giga * 1000 - Peta = Tera * 1000 - Exa = Peta * 1000 -) - -func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 { - return map[string]float64{ - shortSuffix: 1, - "K" + suffix: float64(scale), - "M" + suffix: float64(scale * scale), - "G" + suffix: float64(scale * scale * scale), - "T" + suffix: float64(scale * scale * scale * scale), - "P" + suffix: float64(scale * scale * scale * scale * scale), - "E" + suffix: float64(scale * scale * scale * scale * scale * scale), - } -} diff --git a/Godeps/_workspace/src/github.com/alecthomas/units/util.go b/Godeps/_workspace/src/github.com/alecthomas/units/util.go deleted file mode 100644 index 6527e92d1..000000000 --- a/Godeps/_workspace/src/github.com/alecthomas/units/util.go +++ /dev/null @@ -1,138 +0,0 @@ -package units - -import ( - "errors" - "fmt" - "strings" -) - -var ( - siUnits = []string{"", "K", "M", "G", "T", "P", "E"} -) - -func ToString(n int64, scale int64, suffix, baseSuffix string) string { - mn := len(siUnits) - out := make([]string, mn) - for i, m := range siUnits { - if n%scale != 0 || i == 0 && n == 0 { - s := suffix - if i == 0 { - s = baseSuffix - } - out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s) - } - n /= scale - if n == 0 { - break - } - } - return strings.Join(out, "") -} - -// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123 -var errLeadingInt = errors.New("units: bad [0-9]*") // never printed - -// leadingInt consumes the leading [0-9]* from s. -func leadingInt(s string) (x int64, rem string, err error) { - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c < '0' || c > '9' { - break - } - if x >= (1<<63-10)/10 { - // overflow - return 0, "", errLeadingInt - } - x = x*10 + int64(c) - '0' - } - return x, s[i:], nil -} - -func ParseUnit(s string, unitMap map[string]float64) (int64, error) { - // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ - orig := s - f := float64(0) - neg := false - - // Consume [-+]? - if s != "" { - c := s[0] - if c == '-' || c == '+' { - neg = c == '-' - s = s[1:] - } - } - // Special case: if all that is left is "0", this is zero. - if s == "0" { - return 0, nil - } - if s == "" { - return 0, errors.New("units: invalid " + orig) - } - for s != "" { - g := float64(0) // this element of the sequence - - var x int64 - var err error - - // The next character must be [0-9.] - if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) { - return 0, errors.New("units: invalid " + orig) - } - // Consume [0-9]* - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - g = float64(x) - pre := pl != len(s) // whether we consumed anything before a period - - // Consume (\.[0-9]*)? - post := false - if s != "" && s[0] == '.' { - s = s[1:] - pl := len(s) - x, s, err = leadingInt(s) - if err != nil { - return 0, errors.New("units: invalid " + orig) - } - scale := 1.0 - for n := pl - len(s); n > 0; n-- { - scale *= 10 - } - g += float64(x) / scale - post = pl != len(s) - } - if !pre && !post { - // no digits (e.g. ".s" or "-.s") - return 0, errors.New("units: invalid " + orig) - } - - // Consume unit. - i := 0 - for ; i < len(s); i++ { - c := s[i] - if c == '.' || ('0' <= c && c <= '9') { - break - } - } - u := s[:i] - s = s[i:] - unit, ok := unitMap[u] - if !ok { - return 0, errors.New("units: unknown unit " + u + " in " + orig) - } - - f += g * unit - } - - if neg { - f = -f - } - if f < float64(-1<<63) || f > float64(1<<63-1) { - return 0, errors.New("units: overflow parsing unit") - } - return int64(f), nil -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore b/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml b/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index ce9cb6233..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -go: 1.3.3 diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE b/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE deleted file mode 100644 index 89b817996..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md b/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md deleted file mode 100644 index 020b8fbf3..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/README.md +++ /dev/null @@ -1,116 +0,0 @@ -# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] - -This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. - -[Exponential backoff][exponential backoff wiki] -is an algorithm that uses feedback to multiplicatively decrease the rate of some process, -in order to gradually find an acceptable rate. -The retries exponentially increase and stop increasing when a certain threshold is met. - -## How To - -We define two functions, `Retry()` and `RetryNotify()`. -They receive an `Operation` to execute, a `BackOff` algorithm, -and an optional `Notify` error handler. - -The operation will be executed, and will be retried on failure with delay -as given by the backoff algorithm. The backoff algorithm can also decide when to stop -retrying. -In addition, the notify error handler will be called after each failed attempt, -except for the last time, whose error should be handled by the caller. - -```go -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -func Retry(Operation, BackOff) error -func RetryNotify(Operation, BackOff, Notify) -``` - -## Examples - -See more advanced examples in the [godoc][advanced example]. - -### Retry - -Simple retry helper that uses the default exponential backoff algorithm: - -```go -operation := func() error { - // An operation that might fail. - return nil // or return errors.New("some error") -} - -err := Retry(operation, NewExponentialBackOff()) -if err != nil { - // Handle error. - return err -} - -// Operation is successful. -return nil -``` - -### Ticker - -```go -operation := func() error { - // An operation that might fail - return nil // or return errors.New("some error") -} - -b := NewExponentialBackOff() -ticker := NewTicker(b) - -var err error - -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -for range ticker.C { - if err = operation(); err != nil { - log.Println(err, "will retry...") - continue - } - - ticker.Stop() - break -} - -if err != nil { - // Operation has failed. - return err -} - -// Operation is successful. -return nil -``` - -## Getting Started - -```bash -# install -$ go get github.com/cenkalti/backoff - -# test -$ cd $GOPATH/src/github.com/cenkalti/backoff -$ go get -t ./... -$ go test -v -cover -``` - -[godoc]: https://godoc.org/github.com/cenkalti/backoff -[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png -[travis]: https://travis-ci.org/cenkalti/backoff -[travis image]: https://travis-ci.org/cenkalti/backoff.png - -[google-http-java-client]: https://github.com/google/google-http-java-client -[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff - -[advanced example]: https://godoc.org/github.com/cenkalti/backoff#example_ diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go deleted file mode 100644 index 3fe6783b8..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/adv_example_test.go +++ /dev/null @@ -1,117 +0,0 @@ -package backoff - -import ( - "io/ioutil" - "log" - "net/http" - "time" -) - -// This is an example that demonstrates how this package could be used -// to perform various advanced operations. -// -// It executes an HTTP GET request with exponential backoff, -// while errors are logged and failed responses are closed, as required by net/http package. -// -// Note we define a condition function which is used inside the operation to -// determine whether the operation succeeded or failed. -func Example() error { - res, err := GetWithRetry( - "http://localhost:9999", - ErrorIfStatusCodeIsNot(http.StatusOK), - NewExponentialBackOff()) - - if err != nil { - // Close response body of last (failed) attempt. - // The Last attempt isn't handled by the notify-on-error function, - // which closes the body of all the previous attempts. - if e := res.Body.Close(); e != nil { - log.Printf("error closing last attempt's response body: %s", e) - } - log.Printf("too many failed request attempts: %s", err) - return err - } - defer res.Body.Close() // The response's Body must be closed. - - // Read body - _, _ = ioutil.ReadAll(res.Body) - - // Do more stuff - return nil -} - -// GetWithRetry is a helper function that performs an HTTP GET request -// to the given URL, and retries with the given backoff using the given condition function. -// -// It also uses a notify-on-error function which logs -// and closes the response body of the failed request. -func GetWithRetry(url string, condition Condition, bck BackOff) (*http.Response, error) { - var res *http.Response - err := RetryNotify( - func() error { - var err error - res, err = http.Get(url) - if err != nil { - return err - } - return condition(res) - }, - bck, - LogAndClose()) - - return res, err -} - -// Condition is a retry condition function. -// It receives a response, and returns an error -// if the response failed the condition. -type Condition func(*http.Response) error - -// ErrorIfStatusCodeIsNot returns a retry condition function. -// The condition returns an error -// if the given response's status code is not the given HTTP status code. -func ErrorIfStatusCodeIsNot(status int) Condition { - return func(res *http.Response) error { - if res.StatusCode != status { - return NewError(res) - } - return nil - } -} - -// Error is returned on ErrorIfX() condition functions throughout this package. -type Error struct { - Response *http.Response -} - -func NewError(res *http.Response) *Error { - // Sanity check - if res == nil { - panic("response object is nil") - } - return &Error{Response: res} -} -func (err *Error) Error() string { return "request failed" } - -// LogAndClose is a notify-on-error function. -// It logs the error and closes the response body. -func LogAndClose() Notify { - return func(err error, wait time.Duration) { - switch e := err.(type) { - case *Error: - defer e.Response.Body.Close() - - b, err := ioutil.ReadAll(e.Response.Body) - var body string - if err != nil { - body = "can't read body" - } else { - body = string(b) - } - - log.Printf("%s: %s", e.Response.Status, body) - default: - log.Println(err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go deleted file mode 100644 index 61bd6df66..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff.go +++ /dev/null @@ -1,59 +0,0 @@ -// Package backoff implements backoff algorithms for retrying operations. -// -// Also has a Retry() helper for retrying operations that may fail. -package backoff - -import "time" - -// BackOff is a backoff policy for retrying an operation. -type BackOff interface { - // NextBackOff returns the duration to wait before retrying the operation, - // or backoff.Stop to indicate that no more retries should be made. - // - // Example usage: - // - // duration := backoff.NextBackOff(); - // if (duration == backoff.Stop) { - // // Do not retry operation. - // } else { - // // Sleep for duration and retry operation. - // } - // - NextBackOff() time.Duration - - // Reset to initial state. - Reset() -} - -// Indicates that no more retries should be made for use in NextBackOff(). -const Stop time.Duration = -1 - -// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, -// meaning that the operation is retried immediately without waiting, indefinitely. -type ZeroBackOff struct{} - -func (b *ZeroBackOff) Reset() {} - -func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } - -// StopBackOff is a fixed backoff policy that always returns backoff.Stop for -// NextBackOff(), meaning that the operation should never be retried. -type StopBackOff struct{} - -func (b *StopBackOff) Reset() {} - -func (b *StopBackOff) NextBackOff() time.Duration { return Stop } - -// ConstantBackOff is a backoff policy that always returns the same backoff delay. -// This is in contrast to an exponential backoff policy, -// which returns a delay that grows longer as you call NextBackOff() over and over again. -type ConstantBackOff struct { - Interval time.Duration -} - -func (b *ConstantBackOff) Reset() {} -func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } - -func NewConstantBackOff(d time.Duration) *ConstantBackOff { - return &ConstantBackOff{Interval: d} -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go deleted file mode 100644 index 91f27c4f1..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/backoff_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package backoff - -import ( - "testing" - "time" -) - -func TestNextBackOffMillis(t *testing.T) { - subtestNextBackOff(t, 0, new(ZeroBackOff)) - subtestNextBackOff(t, Stop, new(StopBackOff)) -} - -func subtestNextBackOff(t *testing.T, expectedValue time.Duration, backOffPolicy BackOff) { - for i := 0; i < 10; i++ { - next := backOffPolicy.NextBackOff() - if next != expectedValue { - t.Errorf("got: %d expected: %d", next, expectedValue) - } - } -} - -func TestConstantBackOff(t *testing.T) { - backoff := NewConstantBackOff(time.Second) - if backoff.NextBackOff() != time.Second { - t.Error("invalid interval") - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go deleted file mode 100644 index 0d1852e45..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/example_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package backoff - -import "log" - -func ExampleRetry() error { - operation := func() error { - // An operation that might fail. - return nil // or return errors.New("some error") - } - - err := Retry(operation, NewExponentialBackOff()) - if err != nil { - // Handle error. - return err - } - - // Operation is successful. - return nil -} - -func ExampleTicker() error { - operation := func() error { - // An operation that might fail - return nil // or return errors.New("some error") - } - - b := NewExponentialBackOff() - ticker := NewTicker(b) - - var err error - - // Ticks will continue to arrive when the previous operation is still running, - // so operations that take a while to fail could run in quick succession. - for _ = range ticker.C { - if err = operation(); err != nil { - log.Println(err, "will retry...") - continue - } - - ticker.Stop() - break - } - - if err != nil { - // Operation has failed. - return err - } - - // Operation is successful. - return nil -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go deleted file mode 100644 index cc2a164f2..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential.go +++ /dev/null @@ -1,151 +0,0 @@ -package backoff - -import ( - "math/rand" - "time" -) - -/* -ExponentialBackOff is a backoff implementation that increases the backoff -period for each retry attempt using a randomization function that grows exponentially. - -NextBackOff() is calculated using the following formula: - - randomized interval = - RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) - -In other words NextBackOff() will range between the randomization factor -percentage below and above the retry interval. - -For example, given the following parameters: - - RetryInterval = 2 - RandomizationFactor = 0.5 - Multiplier = 2 - -the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, -multiplied by the exponential, that is, between 2 and 6 seconds. - -Note: MaxInterval caps the RetryInterval and not the randomized interval. - -If the time elapsed since an ExponentialBackOff instance is created goes past the -MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. - -The elapsed time can be reset by calling Reset(). - -Example: Given the following default arguments, for 10 tries the sequence will be, -and assuming we go over the MaxElapsedTime on the 10th try: - - Request # RetryInterval (seconds) Randomized Interval (seconds) - - 1 0.5 [0.25, 0.75] - 2 0.75 [0.375, 1.125] - 3 1.125 [0.562, 1.687] - 4 1.687 [0.8435, 2.53] - 5 2.53 [1.265, 3.795] - 6 3.795 [1.897, 5.692] - 7 5.692 [2.846, 8.538] - 8 8.538 [4.269, 12.807] - 9 12.807 [6.403, 19.210] - 10 19.210 backoff.Stop - -Note: Implementation is not thread-safe. -*/ -type ExponentialBackOff struct { - InitialInterval time.Duration - RandomizationFactor float64 - Multiplier float64 - MaxInterval time.Duration - // After MaxElapsedTime the ExponentialBackOff stops. - // It never stops if MaxElapsedTime == 0. - MaxElapsedTime time.Duration - Clock Clock - - currentInterval time.Duration - startTime time.Time -} - -// Clock is an interface that returns current time for BackOff. -type Clock interface { - Now() time.Time -} - -// Default values for ExponentialBackOff. -const ( - DefaultInitialInterval = 500 * time.Millisecond - DefaultRandomizationFactor = 0.5 - DefaultMultiplier = 1.5 - DefaultMaxInterval = 60 * time.Second - DefaultMaxElapsedTime = 15 * time.Minute -) - -// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. -func NewExponentialBackOff() *ExponentialBackOff { - b := &ExponentialBackOff{ - InitialInterval: DefaultInitialInterval, - RandomizationFactor: DefaultRandomizationFactor, - Multiplier: DefaultMultiplier, - MaxInterval: DefaultMaxInterval, - MaxElapsedTime: DefaultMaxElapsedTime, - Clock: SystemClock, - } - b.Reset() - return b -} - -type systemClock struct{} - -func (t systemClock) Now() time.Time { - return time.Now() -} - -// SystemClock implements Clock interface that uses time.Now(). -var SystemClock = systemClock{} - -// Reset the interval back to the initial retry interval and restarts the timer. -func (b *ExponentialBackOff) Reset() { - b.currentInterval = b.InitialInterval - b.startTime = b.Clock.Now() -} - -// NextBackOff calculates the next backoff interval using the formula: -// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) -func (b *ExponentialBackOff) NextBackOff() time.Duration { - // Make sure we have not gone over the maximum elapsed time. - if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { - return Stop - } - defer b.incrementCurrentInterval() - return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) -} - -// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance -// is created and is reset when Reset() is called. -// -// The elapsed time is computed using time.Now().UnixNano(). -func (b *ExponentialBackOff) GetElapsedTime() time.Duration { - return b.Clock.Now().Sub(b.startTime) -} - -// Increments the current interval by multiplying it with the multiplier. -func (b *ExponentialBackOff) incrementCurrentInterval() { - // Check for overflow, if overflow is detected set the current interval to the max interval. - if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { - b.currentInterval = b.MaxInterval - } else { - b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) - } -} - -// Returns a random value from the following interval: -// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. -func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { - var delta = randomizationFactor * float64(currentInterval) - var minInterval = float64(currentInterval) - delta - var maxInterval = float64(currentInterval) + delta - - // Get a random value from the range [minInterval, maxInterval]. - // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then - // we want a 33% chance for selecting either 1, 2 or 3. - return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go deleted file mode 100644 index 11b95e4f6..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/exponential_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package backoff - -import ( - "math" - "testing" - "time" -) - -func TestBackOff(t *testing.T) { - var ( - testInitialInterval = 500 * time.Millisecond - testRandomizationFactor = 0.1 - testMultiplier = 2.0 - testMaxInterval = 5 * time.Second - testMaxElapsedTime = 15 * time.Minute - ) - - exp := NewExponentialBackOff() - exp.InitialInterval = testInitialInterval - exp.RandomizationFactor = testRandomizationFactor - exp.Multiplier = testMultiplier - exp.MaxInterval = testMaxInterval - exp.MaxElapsedTime = testMaxElapsedTime - exp.Reset() - - var expectedResults = []time.Duration{500, 1000, 2000, 4000, 5000, 5000, 5000, 5000, 5000, 5000} - for i, d := range expectedResults { - expectedResults[i] = d * time.Millisecond - } - - for _, expected := range expectedResults { - assertEquals(t, expected, exp.currentInterval) - // Assert that the next backoff falls in the expected range. - var minInterval = expected - time.Duration(testRandomizationFactor*float64(expected)) - var maxInterval = expected + time.Duration(testRandomizationFactor*float64(expected)) - var actualInterval = exp.NextBackOff() - if !(minInterval <= actualInterval && actualInterval <= maxInterval) { - t.Error("error") - } - } -} - -func TestGetRandomizedInterval(t *testing.T) { - // 33% chance of being 1. - assertEquals(t, 1, getRandomValueFromInterval(0.5, 0, 2)) - assertEquals(t, 1, getRandomValueFromInterval(0.5, 0.33, 2)) - // 33% chance of being 2. - assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.34, 2)) - assertEquals(t, 2, getRandomValueFromInterval(0.5, 0.66, 2)) - // 33% chance of being 3. - assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.67, 2)) - assertEquals(t, 3, getRandomValueFromInterval(0.5, 0.99, 2)) -} - -type TestClock struct { - i time.Duration - start time.Time -} - -func (c *TestClock) Now() time.Time { - t := c.start.Add(c.i) - c.i += time.Second - return t -} - -func TestGetElapsedTime(t *testing.T) { - var exp = NewExponentialBackOff() - exp.Clock = &TestClock{} - exp.Reset() - - var elapsedTime = exp.GetElapsedTime() - if elapsedTime != time.Second { - t.Errorf("elapsedTime=%d", elapsedTime) - } -} - -func TestMaxElapsedTime(t *testing.T) { - var exp = NewExponentialBackOff() - exp.Clock = &TestClock{start: time.Time{}.Add(10000 * time.Second)} - // Change the currentElapsedTime to be 0 ensuring that the elapsed time will be greater - // than the max elapsed time. - exp.startTime = time.Time{} - assertEquals(t, Stop, exp.NextBackOff()) -} - -func TestBackOffOverflow(t *testing.T) { - var ( - testInitialInterval time.Duration = math.MaxInt64 / 2 - testMaxInterval time.Duration = math.MaxInt64 - testMultiplier = 2.1 - ) - - exp := NewExponentialBackOff() - exp.InitialInterval = testInitialInterval - exp.Multiplier = testMultiplier - exp.MaxInterval = testMaxInterval - exp.Reset() - - exp.NextBackOff() - // Assert that when an overflow is possible the current varerval time.Duration is set to the max varerval time.Duration . - assertEquals(t, testMaxInterval, exp.currentInterval) -} - -func assertEquals(t *testing.T, expected, value time.Duration) { - if expected != value { - t.Errorf("got: %d, expected: %d", value, expected) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go deleted file mode 100644 index f01f2bbd0..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry.go +++ /dev/null @@ -1,46 +0,0 @@ -package backoff - -import "time" - -// An Operation is executing by Retry() or RetryNotify(). -// The operation will be retried using a backoff policy if it returns an error. -type Operation func() error - -// Notify is a notify-on-error function. It receives an operation error and -// backoff delay if the operation failed (with an error). -// -// NOTE that if the backoff policy stated to stop retrying, -// the notify function isn't called. -type Notify func(error, time.Duration) - -// Retry the function f until it does not return error or BackOff stops. -// f is guaranteed to be run at least once. -// It is the caller's responsibility to reset b after Retry returns. -// -// Retry sleeps the goroutine for the duration returned by BackOff after a -// failed operation returns. -func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } - -// RetryNotify calls notify function with the error and wait duration -// for each failed attempt before sleep. -func RetryNotify(operation Operation, b BackOff, notify Notify) error { - var err error - var next time.Duration - - b.Reset() - for { - if err = operation(); err == nil { - return nil - } - - if next = b.NextBackOff(); next == Stop { - return err - } - - if notify != nil { - notify(err, next) - } - - time.Sleep(next) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go deleted file mode 100644 index c0d25ab76..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/retry_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package backoff - -import ( - "errors" - "log" - "testing" -) - -func TestRetry(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - err := Retry(f, NewExponentialBackOff()) - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go deleted file mode 100644 index 7a5ff4ed1..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker.go +++ /dev/null @@ -1,79 +0,0 @@ -package backoff - -import ( - "runtime" - "sync" - "time" -) - -// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. -// -// Ticks will continue to arrive when the previous operation is still running, -// so operations that take a while to fail could run in quick succession. -type Ticker struct { - C <-chan time.Time - c chan time.Time - b BackOff - stop chan struct{} - stopOnce sync.Once -} - -// NewTicker returns a new Ticker containing a channel that will send the time at times -// specified by the BackOff argument. Ticker is guaranteed to tick at least once. -// The channel is closed when Stop method is called or BackOff stops. -func NewTicker(b BackOff) *Ticker { - c := make(chan time.Time) - t := &Ticker{ - C: c, - c: c, - b: b, - stop: make(chan struct{}), - } - go t.run() - runtime.SetFinalizer(t, (*Ticker).Stop) - return t -} - -// Stop turns off a ticker. After Stop, no more ticks will be sent. -func (t *Ticker) Stop() { - t.stopOnce.Do(func() { close(t.stop) }) -} - -func (t *Ticker) run() { - c := t.c - defer close(c) - t.b.Reset() - - // Ticker is guaranteed to tick at least once. - afterC := t.send(time.Now()) - - for { - if afterC == nil { - return - } - - select { - case tick := <-afterC: - afterC = t.send(tick) - case <-t.stop: - t.c = nil // Prevent future ticks from being sent to the channel. - return - } - } -} - -func (t *Ticker) send(tick time.Time) <-chan time.Time { - select { - case t.c <- tick: - case <-t.stop: - return nil - } - - next := t.b.NextBackOff() - if next == Stop { - t.Stop() - return nil - } - - return time.After(next) -} diff --git a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go b/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go deleted file mode 100644 index 7c392df46..000000000 --- a/Godeps/_workspace/src/github.com/cenkalti/backoff/ticker_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package backoff - -import ( - "errors" - "log" - "testing" -) - -func TestTicker(t *testing.T) { - const successOn = 3 - var i = 0 - - // This function is successfull on "successOn" calls. - f := func() error { - i++ - log.Printf("function is called %d. time\n", i) - - if i == successOn { - log.Println("OK") - return nil - } - - log.Println("error") - return errors.New("error") - } - - b := NewExponentialBackOff() - ticker := NewTicker(b) - - var err error - for _ = range ticker.C { - if err = f(); err != nil { - t.Log(err) - continue - } - - break - } - if err != nil { - t.Errorf("unexpected error: %s", err.Error()) - } - if i != successOn { - t.Errorf("invalid number of retries: %d", i) - } -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml deleted file mode 100644 index 46cc6e78c..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3.3 -notifications: - # See http://about.travis-ci.org/docs/user/build-configuration/ to learn more - # about configuring notification recipients and more. - email: - recipients: - - coda.hale@gmail.com diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/LICENSE b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/LICENSE deleted file mode 100644 index f9835c241..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Coda Hale - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md deleted file mode 100644 index 614b197c3..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/README.md +++ /dev/null @@ -1,15 +0,0 @@ -hdrhistogram -============ - -[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram) - -A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram). - -> A Histogram that supports recording and analyzing sampled data value counts -> across a configurable integer value range with configurable value precision -> within the range. Value precision is expressed as the number of significant -> digits in the value recording, and provides control over value quantization -> behavior across the value range and the subsequent value resolution at any -> given level. - -For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram). diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go deleted file mode 100644 index 841544c77..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr.go +++ /dev/null @@ -1,520 +0,0 @@ -// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram -// data structure. The HDR Histogram allows for fast and accurate analysis of -// the extreme ranges of data with non-normal distributions, like latency. -package hdrhistogram - -import ( - "fmt" - "math" -) - -// A Bracket is a part of a cumulative distribution. -type Bracket struct { - Quantile float64 - Count, ValueAt int64 -} - -// A Snapshot is an exported view of a Histogram, useful for serializing them. -// A Histogram can be constructed from it by passing it to Import. -type Snapshot struct { - LowestTrackableValue int64 - HighestTrackableValue int64 - SignificantFigures int64 - Counts []int64 -} - -// A Histogram is a lossy data structure used to record the distribution of -// non-normally distributed data (like latency) with a high degree of accuracy -// and a bounded degree of precision. -type Histogram struct { - lowestTrackableValue int64 - highestTrackableValue int64 - unitMagnitude int64 - significantFigures int64 - subBucketHalfCountMagnitude int32 - subBucketHalfCount int32 - subBucketMask int64 - subBucketCount int32 - bucketCount int32 - countsLen int32 - totalCount int64 - counts []int64 -} - -// New returns a new Histogram instance capable of tracking values in the given -// range and with the given amount of precision. -func New(minValue, maxValue int64, sigfigs int) *Histogram { - if sigfigs < 1 || 5 < sigfigs { - panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) - } - - largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) - subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) - - subBucketHalfCountMagnitude := subBucketCountMagnitude - if subBucketHalfCountMagnitude < 1 { - subBucketHalfCountMagnitude = 1 - } - subBucketHalfCountMagnitude-- - - unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) - if unitMagnitude < 0 { - unitMagnitude = 0 - } - - subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1)) - - subBucketHalfCount := subBucketCount / 2 - subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude) - - // determine exponent range needed to support the trackable value with no - // overflow: - smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude) - bucketsNeeded := int32(1) - for smallestUntrackableValue < maxValue { - smallestUntrackableValue <<= 1 - bucketsNeeded++ - } - - bucketCount := bucketsNeeded - countsLen := (bucketCount + 1) * (subBucketCount / 2) - - return &Histogram{ - lowestTrackableValue: minValue, - highestTrackableValue: maxValue, - unitMagnitude: int64(unitMagnitude), - significantFigures: int64(sigfigs), - subBucketHalfCountMagnitude: subBucketHalfCountMagnitude, - subBucketHalfCount: subBucketHalfCount, - subBucketMask: subBucketMask, - subBucketCount: subBucketCount, - bucketCount: bucketCount, - countsLen: countsLen, - totalCount: 0, - counts: make([]int64, countsLen), - } -} - -// ByteSize returns an estimate of the amount of memory allocated to the -// histogram in bytes. -// -// N.B.: This does not take into account the overhead for slices, which are -// small, constant, and specific to the compiler version. -func (h *Histogram) ByteSize() int { - return 6*8 + 5*4 + len(h.counts)*8 -} - -// Merge merges the data stored in the given histogram with the receiver, -// returning the number of recorded values which had to be dropped. -func (h *Histogram) Merge(from *Histogram) (dropped int64) { - i := from.rIterator() - for i.next() { - v := i.valueFromIdx - c := i.countAtIdx - - if h.RecordValues(v, c) != nil { - dropped += c - } - } - - return -} - -// TotalCount returns total number of values recorded. -func (h *Histogram) TotalCount() int64 { - return h.totalCount -} - -// Max returns the approximate maximum recorded value. -func (h *Histogram) Max() int64 { - var max int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - max = i.highestEquivalentValue - } - } - return h.lowestEquivalentValue(max) -} - -// Min returns the approximate minimum recorded value. -func (h *Histogram) Min() int64 { - var min int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 && min == 0 { - min = i.highestEquivalentValue - break - } - } - return h.lowestEquivalentValue(min) -} - -// Mean returns the approximate arithmetic mean of the recorded values. -func (h *Histogram) Mean() float64 { - if h.totalCount == 0 { - return 0 - } - var total int64 - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx) - } - } - return float64(total) / float64(h.totalCount) -} - -// StdDev returns the approximate standard deviation of the recorded values. -func (h *Histogram) StdDev() float64 { - if h.totalCount == 0 { - return 0 - } - - mean := h.Mean() - geometricDevTotal := 0.0 - - i := h.iterator() - for i.next() { - if i.countAtIdx != 0 { - dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean - geometricDevTotal += (dev * dev) * float64(i.countAtIdx) - } - } - - return math.Sqrt(geometricDevTotal / float64(h.totalCount)) -} - -// Reset deletes all recorded values and restores the histogram to its original -// state. -func (h *Histogram) Reset() { - h.totalCount = 0 - for i := range h.counts { - h.counts[i] = 0 - } -} - -// RecordValue records the given value, returning an error if the value is out -// of range. -func (h *Histogram) RecordValue(v int64) error { - return h.RecordValues(v, 1) -} - -// RecordCorrectedValue records the given value, correcting for stalls in the -// recording process. This only works for processes which are recording values -// at an expected interval (e.g., doing jitter analysis). Processes which are -// recording ad-hoc values (e.g., latency for incoming requests) can't take -// advantage of this. -func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { - if err := h.RecordValue(v); err != nil { - return err - } - - if expectedInterval <= 0 || v <= expectedInterval { - return nil - } - - missingValue := v - expectedInterval - for missingValue >= expectedInterval { - if err := h.RecordValue(missingValue); err != nil { - return err - } - missingValue -= expectedInterval - } - - return nil -} - -// RecordValues records n occurrences of the given value, returning an error if -// the value is out of range. -func (h *Histogram) RecordValues(v, n int64) error { - idx := h.countsIndexFor(v) - if idx < 0 || int(h.countsLen) <= idx { - return fmt.Errorf("value %d is too large to be recorded", v) - } - h.counts[idx] += n - h.totalCount += n - - return nil -} - -// ValueAtQuantile returns the recorded value at the given quantile (0..100). -func (h *Histogram) ValueAtQuantile(q float64) int64 { - if q > 100 { - q = 100 - } - - total := int64(0) - countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5) - - i := h.iterator() - for i.next() { - total += i.countAtIdx - if total >= countAtPercentile { - return h.highestEquivalentValue(i.valueFromIdx) - } - } - - return 0 -} - -// CumulativeDistribution returns an ordered list of brackets of the -// distribution of recorded values. -func (h *Histogram) CumulativeDistribution() []Bracket { - var result []Bracket - - i := h.pIterator(1) - for i.next() { - result = append(result, Bracket{ - Quantile: i.percentile, - Count: i.countToIdx, - ValueAt: i.highestEquivalentValue, - }) - } - - return result -} - -// Equals returns true if the two Histograms are equivalent, false if not. -func (h *Histogram) Equals(other *Histogram) bool { - switch { - case - h.lowestTrackableValue != other.lowestTrackableValue, - h.highestTrackableValue != other.highestTrackableValue, - h.unitMagnitude != other.unitMagnitude, - h.significantFigures != other.significantFigures, - h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude, - h.subBucketHalfCount != other.subBucketHalfCount, - h.subBucketMask != other.subBucketMask, - h.subBucketCount != other.subBucketCount, - h.bucketCount != other.bucketCount, - h.countsLen != other.countsLen, - h.totalCount != other.totalCount: - return false - default: - for i, c := range h.counts { - if c != other.counts[i] { - return false - } - } - } - return true -} - -// Export returns a snapshot view of the Histogram. This can be later passed to -// Import to construct a new Histogram with the same state. -func (h *Histogram) Export() *Snapshot { - return &Snapshot{ - LowestTrackableValue: h.lowestTrackableValue, - HighestTrackableValue: h.highestTrackableValue, - SignificantFigures: h.significantFigures, - Counts: h.counts, - } -} - -// Import returns a new Histogram populated from the Snapshot data. -func Import(s *Snapshot) *Histogram { - h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) - h.counts = s.Counts - totalCount := int64(0) - for i := int32(0); i < h.countsLen; i++ { - countAtIndex := h.counts[i] - if countAtIndex > 0 { - totalCount += countAtIndex - } - } - h.totalCount = totalCount - return h -} - -func (h *Histogram) iterator() *iterator { - return &iterator{ - h: h, - subBucketIdx: -1, - } -} - -func (h *Histogram) rIterator() *rIterator { - return &rIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - } -} - -func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator { - return &pIterator{ - iterator: iterator{ - h: h, - subBucketIdx: -1, - }, - ticksPerHalfDistance: ticksPerHalfDistance, - } -} - -func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - adjustedBucket := bucketIdx - if subBucketIdx >= h.subBucketCount { - adjustedBucket++ - } - return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket)) -} - -func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { - return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude) -} - -func (h *Histogram) lowestEquivalentValue(v int64) int64 { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return h.valueFromIndex(bucketIdx, subBucketIdx) -} - -func (h *Histogram) nextNonEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v) -} - -func (h *Histogram) highestEquivalentValue(v int64) int64 { - return h.nextNonEquivalentValue(v) - 1 -} - -func (h *Histogram) medianEquivalentValue(v int64) int64 { - return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1) -} - -func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 { - return h.counts[h.countsIndex(bucketIdx, subBucketIdx)] -} - -func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 { - bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude) - offsetInBucket := subBucketIdx - h.subBucketHalfCount - return bucketBaseIdx + offsetInBucket -} - -func (h *Histogram) getBucketIndex(v int64) int32 { - pow2Ceiling := bitLen(v | h.subBucketMask) - return int32(pow2Ceiling - int64(h.unitMagnitude) - - int64(h.subBucketHalfCountMagnitude+1)) -} - -func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 { - return int32(v >> uint(int64(idx)+int64(h.unitMagnitude))) -} - -func (h *Histogram) countsIndexFor(v int64) int { - bucketIdx := h.getBucketIndex(v) - subBucketIdx := h.getSubBucketIdx(v, bucketIdx) - return int(h.countsIndex(bucketIdx, subBucketIdx)) -} - -type iterator struct { - h *Histogram - bucketIdx, subBucketIdx int32 - countAtIdx, countToIdx, valueFromIdx int64 - highestEquivalentValue int64 -} - -func (i *iterator) next() bool { - if i.countToIdx >= i.h.totalCount { - return false - } - - // increment bucket - i.subBucketIdx++ - if i.subBucketIdx >= i.h.subBucketCount { - i.subBucketIdx = i.h.subBucketHalfCount - i.bucketIdx++ - } - - if i.bucketIdx >= i.h.bucketCount { - return false - } - - i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx) - i.countToIdx += i.countAtIdx - i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) - i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) - - return true -} - -type rIterator struct { - iterator - countAddedThisStep int64 -} - -func (r *rIterator) next() bool { - for r.iterator.next() { - if r.countAtIdx != 0 { - r.countAddedThisStep = r.countAtIdx - return true - } - } - return false -} - -type pIterator struct { - iterator - seenLastValue bool - ticksPerHalfDistance int32 - percentileToIteratorTo float64 - percentile float64 -} - -func (p *pIterator) next() bool { - if !(p.countToIdx < p.h.totalCount) { - if p.seenLastValue { - return false - } - - p.seenLastValue = true - p.percentile = 100 - - return true - } - - if p.subBucketIdx == -1 && !p.iterator.next() { - return false - } - - var done = false - for !done { - currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) - if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { - p.percentile = p.percentileToIteratorTo - halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) - percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance - p.percentileToIteratorTo += 100.0 / percentileReportingTicks - return true - } - done = !p.iterator.next() - } - - return true -} - -func bitLen(x int64) (n int64) { - for ; x >= 0x8000; x >>= 16 { - n += 16 - } - if x >= 0x80 { - x >>= 8 - n += 8 - } - if x >= 0x8 { - x >>= 4 - n += 4 - } - if x >= 0x2 { - x >>= 2 - n += 2 - } - if x >= 0x1 { - n++ - } - return -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go deleted file mode 100644 index b8cb22524..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/hdr_test.go +++ /dev/null @@ -1,344 +0,0 @@ -package hdrhistogram_test - -import ( - "math" - "reflect" - "testing" - - "github.com/codahale/hdrhistogram" -) - -func TestHighSigFig(t *testing.T) { - input := []int64{ - 459876, 669187, 711612, 816326, 931423, 1033197, 1131895, 2477317, - 3964974, 12718782, - } - - hist := hdrhistogram.New(459876, 12718782, 5) - for _, sample := range input { - hist.RecordValue(sample) - } - - if v, want := hist.ValueAtQuantile(50), int64(1048575); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func TestValueAtQuantile(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - data := []struct { - q float64 - v int64 - }{ - {q: 50, v: 500223}, - {q: 75, v: 750079}, - {q: 90, v: 900095}, - {q: 95, v: 950271}, - {q: 99, v: 990207}, - {q: 99.9, v: 999423}, - {q: 99.99, v: 999935}, - } - - for _, d := range data { - if v := h.ValueAtQuantile(d.q); v != d.v { - t.Errorf("P%v was %v, but expected %v", d.q, v, d.v) - } - } -} - -func TestMean(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Mean(), 500000.013312; v != want { - t.Errorf("Mean was %v, but expected %v", v, want) - } -} - -func TestStdDev(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.StdDev(), 288675.1403682715; v != want { - t.Errorf("StdDev was %v, but expected %v", v, want) - } -} - -func TestTotalCount(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - if v, want := h.TotalCount(), int64(i+1); v != want { - t.Errorf("TotalCount was %v, but expected %v", v, want) - } - } -} - -func TestMax(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Max(), int64(999936); v != want { - t.Errorf("Max was %v, but expected %v", v, want) - } -} - -func TestReset(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h.Reset() - - if v, want := h.Max(), int64(0); v != want { - t.Errorf("Max was %v, but expected %v", v, want) - } -} - -func TestMerge(t *testing.T) { - h1 := hdrhistogram.New(1, 1000, 3) - h2 := hdrhistogram.New(1, 1000, 3) - - for i := 0; i < 100; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - for i := 100; i < 200; i++ { - if err := h2.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h1.Merge(h2) - - if v, want := h1.ValueAtQuantile(50), int64(99); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func TestMin(t *testing.T) { - h := hdrhistogram.New(1, 10000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if v, want := h.Min(), int64(0); v != want { - t.Errorf("Min was %v, but expected %v", v, want) - } -} - -func TestByteSize(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if v, want := h.ByteSize(), 65604; v != want { - t.Errorf("ByteSize was %v, but expected %d", v, want) - } -} - -func TestRecordCorrectedValue(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if err := h.RecordCorrectedValue(10, 100); err != nil { - t.Fatal(err) - } - - if v, want := h.ValueAtQuantile(75), int64(10); v != want { - t.Errorf("Corrected value was %v, but expected %v", v, want) - } -} - -func TestRecordCorrectedValueStall(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - - if err := h.RecordCorrectedValue(1000, 100); err != nil { - t.Fatal(err) - } - - if v, want := h.ValueAtQuantile(75), int64(800); v != want { - t.Errorf("Corrected value was %v, but expected %v", v, want) - } -} - -func TestCumulativeDistribution(t *testing.T) { - h := hdrhistogram.New(1, 100000000, 3) - - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - actual := h.CumulativeDistribution() - expected := []hdrhistogram.Bracket{ - hdrhistogram.Bracket{Quantile: 0, Count: 1, ValueAt: 0}, - hdrhistogram.Bracket{Quantile: 50, Count: 500224, ValueAt: 500223}, - hdrhistogram.Bracket{Quantile: 75, Count: 750080, ValueAt: 750079}, - hdrhistogram.Bracket{Quantile: 87.5, Count: 875008, ValueAt: 875007}, - hdrhistogram.Bracket{Quantile: 93.75, Count: 937984, ValueAt: 937983}, - hdrhistogram.Bracket{Quantile: 96.875, Count: 969216, ValueAt: 969215}, - hdrhistogram.Bracket{Quantile: 98.4375, Count: 984576, ValueAt: 984575}, - hdrhistogram.Bracket{Quantile: 99.21875, Count: 992256, ValueAt: 992255}, - hdrhistogram.Bracket{Quantile: 99.609375, Count: 996352, ValueAt: 996351}, - hdrhistogram.Bracket{Quantile: 99.8046875, Count: 998400, ValueAt: 998399}, - hdrhistogram.Bracket{Quantile: 99.90234375, Count: 999424, ValueAt: 999423}, - hdrhistogram.Bracket{Quantile: 99.951171875, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.9755859375, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.98779296875, Count: 999936, ValueAt: 999935}, - hdrhistogram.Bracket{Quantile: 99.993896484375, Count: 1000000, ValueAt: 1000447}, - hdrhistogram.Bracket{Quantile: 100, Count: 1000000, ValueAt: 1000447}, - } - - if !reflect.DeepEqual(actual, expected) { - t.Errorf("CF was %#v, but expected %#v", actual, expected) - } -} - -func TestNaN(t *testing.T) { - h := hdrhistogram.New(1, 100000, 3) - if math.IsNaN(h.Mean()) { - t.Error("mean is NaN") - } - if math.IsNaN(h.StdDev()) { - t.Error("stddev is NaN") - } -} - -func BenchmarkHistogramRecordValue(b *testing.B) { - h := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - h.RecordValue(100) - } -} - -func BenchmarkNew(b *testing.B) { - b.ReportAllocs() - - for i := 0; i < b.N; i++ { - hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min - } -} - -func TestUnitMagnitudeOverflow(t *testing.T) { - h := hdrhistogram.New(0, 200, 4) - if err := h.RecordValue(11); err != nil { - t.Fatal(err) - } -} - -func TestSubBucketMaskOverflow(t *testing.T) { - hist := hdrhistogram.New(2e7, 1e8, 5) - for _, sample := range [...]int64{1e8, 2e7, 3e7} { - hist.RecordValue(sample) - } - - for q, want := range map[float64]int64{ - 50: 33554431, - 83.33: 33554431, - 83.34: 100663295, - 99: 100663295, - } { - if got := hist.ValueAtQuantile(q); got != want { - t.Errorf("got %d for %fth percentile. want: %d", got, q, want) - } - } -} - -func TestExportImport(t *testing.T) { - min := int64(1) - max := int64(10000000) - sigfigs := 3 - h := hdrhistogram.New(min, max, sigfigs) - for i := 0; i < 1000000; i++ { - if err := h.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - s := h.Export() - - if v := s.LowestTrackableValue; v != min { - t.Errorf("LowestTrackableValue was %v, but expected %v", v, min) - } - - if v := s.HighestTrackableValue; v != max { - t.Errorf("HighestTrackableValue was %v, but expected %v", v, max) - } - - if v := int(s.SignificantFigures); v != sigfigs { - t.Errorf("SignificantFigures was %v, but expected %v", v, sigfigs) - } - - if imported := hdrhistogram.Import(s); !imported.Equals(h) { - t.Error("Expected Histograms to be equivalent") - } - -} - -func TestEquals(t *testing.T) { - h1 := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 1000000; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - h2 := hdrhistogram.New(1, 10000000, 3) - for i := 0; i < 10000; i++ { - if err := h1.RecordValue(int64(i)); err != nil { - t.Fatal(err) - } - } - - if h1.Equals(h2) { - t.Error("Expected Histograms to not be equivalent") - } - - h1.Reset() - h2.Reset() - - if !h1.Equals(h2) { - t.Error("Expected Histograms to be equivalent") - } -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go deleted file mode 100644 index dc43612a4..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window.go +++ /dev/null @@ -1,45 +0,0 @@ -package hdrhistogram - -// A WindowedHistogram combines histograms to provide windowed statistics. -type WindowedHistogram struct { - idx int - h []Histogram - m *Histogram - - Current *Histogram -} - -// NewWindowed creates a new WindowedHistogram with N underlying histograms with -// the given parameters. -func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram { - w := WindowedHistogram{ - idx: -1, - h: make([]Histogram, n), - m: New(minValue, maxValue, sigfigs), - } - - for i := range w.h { - w.h[i] = *New(minValue, maxValue, sigfigs) - } - w.Rotate() - - return &w -} - -// Merge returns a histogram which includes the recorded values from all the -// sections of the window. -func (w *WindowedHistogram) Merge() *Histogram { - w.m.Reset() - for _, h := range w.h { - w.m.Merge(&h) - } - return w.m -} - -// Rotate resets the oldest histogram and rotates it to be used as the current -// histogram. -func (w *WindowedHistogram) Rotate() { - w.idx++ - w.Current = &w.h[w.idx%len(w.h)] - w.Current.Reset() -} diff --git a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go b/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go deleted file mode 100644 index 7e787588a..000000000 --- a/Godeps/_workspace/src/github.com/codahale/hdrhistogram/window_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package hdrhistogram_test - -import ( - "testing" - - "github.com/codahale/hdrhistogram" -) - -func TestWindowedHistogram(t *testing.T) { - w := hdrhistogram.NewWindowed(2, 1, 1000, 3) - - for i := 0; i < 100; i++ { - w.Current.RecordValue(int64(i)) - } - w.Rotate() - - for i := 100; i < 200; i++ { - w.Current.RecordValue(int64(i)) - } - w.Rotate() - - for i := 200; i < 300; i++ { - w.Current.RecordValue(int64(i)) - } - - if v, want := w.Merge().ValueAtQuantile(50), int64(199); v != want { - t.Errorf("Median was %v, but expected %v", v, want) - } -} - -func BenchmarkWindowedHistogramRecordAndRotate(b *testing.B) { - w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - if err := w.Current.RecordValue(100); err != nil { - b.Fatal(err) - } - - if i%100000 == 1 { - w.Rotate() - } - } -} - -func BenchmarkWindowedHistogramMerge(b *testing.B) { - w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) - for i := 0; i < 10000000; i++ { - if err := w.Current.RecordValue(100); err != nil { - b.Fatal(err) - } - - if i%100000 == 1 { - w.Rotate() - } - } - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - w.Merge() - } -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/LICENSE b/Godeps/_workspace/src/github.com/codegangsta/negroni/LICENSE deleted file mode 100644 index 08b5e20ac..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Jeremy Saenz - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/README.md b/Godeps/_workspace/src/github.com/codegangsta/negroni/README.md deleted file mode 100644 index 9294d7055..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/README.md +++ /dev/null @@ -1,181 +0,0 @@ -# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) - -Negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of `net/http` Handlers. - -If you like the idea of [Martini](http://github.com/go-martini/martini), but you think it contains too much magic, then Negroni is a great fit. - - -Language Translations: -* [Português Brasileiro (pt_BR)](translations/README_pt_br.md) - -## Getting Started - -After installing Go and setting up your [GOPATH](http://golang.org/doc/code.html#GOPATH), create your first `.go` file. We'll call it `server.go`. - -~~~ go -package main - -import ( - "github.com/codegangsta/negroni" - "net/http" - "fmt" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -Then install the Negroni package (**go 1.1** and greater is required): -~~~ -go get github.com/codegangsta/negroni -~~~ - -Then run your server: -~~~ -go run server.go -~~~ - -You will now have a Go net/http webserver running on `localhost:3000`. - -## Need Help? -If you have a question or feature request, [go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). The GitHub issues for Negroni will be used exclusively for bug reports and pull requests. - -## Is Negroni a Framework? -Negroni is **not** a framework. It is a library that is designed to work directly with net/http. - -## Routing? -Negroni is BYOR (Bring your own Router). The Go community already has a number of great http routers available, Negroni tries to play well with all of them by fully supporting `net/http`. For instance, integrating with [Gorilla Mux](http://github.com/gorilla/mux) looks like so: - -~~~ go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(Middleware1, Middleware2) -// Or use a middleware with the Use() function -n.Use(Middleware3) -// router goes last -n.UseHandler(router) - -n.Run(":3000") -~~~ - -## `negroni.Classic()` -`negroni.Classic()` provides some default middleware that is useful for most applications: - -* `negroni.Recovery` - Panic Recovery Middleware. -* `negroni.Logging` - Request/Response Logging Middleware. -* `negroni.Static` - Static File serving under the "public" directory. - -This makes it really easy to get started with some useful features from Negroni. - -## Handlers -Negroni provides a bidirectional middleware flow. This is done through the `negroni.Handler` interface: - -~~~ go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -~~~ - -If a middleware hasn't already written to the ResponseWriter, it should call the next `http.HandlerFunc` in the chain to yield to the next middleware handler. This can be used for great good: - -~~~ go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // do some stuff before - next(rw, r) - // do some stuff after -} -~~~ - -And you can map it to the handler chain with the `Use` function: - -~~~ go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -~~~ - -You can also map plain old `http.Handler`s: - -~~~ go -n := negroni.New() - -mux := http.NewServeMux() -// map your routes - -n.UseHandler(mux) - -n.Run(":3000") -~~~ - -## `Run()` -Negroni has a convenience function called `Run`. `Run` takes an addr string identical to [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe). - -~~~ go -n := negroni.Classic() -// ... -log.Fatal(http.ListenAndServe(":8080", n)) -~~~ - -## Route Specific Middleware -If you have a route group of routes that need specific middleware to be executed, you can simply create a new Negroni instance and use it as your route handler. - -~~~ go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// add admin routes here - -// Create a new negroni for the admin middleware -router.Handle("/admin", negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -~~~ - -## Third Party Middleware - -Here is a current list of Negroni compatible middlware. Feel free to put up a PR linking your middleware if you have built one: - - -| Middleware | Author | Description | -| -----------|--------|-------------| -| [RestGate](https://github.com/pjebs/restgate) | [Prasanga Siripala](https://github.com/pjebs) | Secure authentication for REST API endpoints | -| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Middleware that implements a few quick security wins | -| [JWT Middleware](https://github.com/auth0/go-jwt-middleware) | [Auth0](https://github.com/auth0) | Middleware checks for a JWT on the `Authorization` header on incoming requests and decodes it| -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Data binding from HTTP requests into structs | -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Render JSON, XML and HTML templates | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | GZIP response compression | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | oAuth2 middleware | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Session Management | -| [permissions2](https://github.com/xyproto/permissions2) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, users and permissions | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Generate TinySVG, HTML and CSS on the fly | -| [cors](https://github.com/rs/cors) | [Olivier Poitrey](https://github.com/rs) | [Cross Origin Resource Sharing](http://www.w3.org/TR/cors/) (CORS) support | -| [xrequestid](https://github.com/pilu/xrequestid) | [Andrea Franz](https://github.com/pilu) | Middleware that assigns a random X-Request-Id header to each request | -| [VanGoH](https://github.com/auroratechnologies/vangoh) | [Taylor Wrobel](https://github.com/twrobel3) | Configurable [AWS-Style](http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html) HMAC authentication middleware | -| [stats](https://github.com/thoas/stats) | [Florent Messa](https://github.com/thoas) | Store information about your web application (response time, etc.) | - -## Examples -[Alexander Rødseth](https://github.com/xyproto) created [mooseware](https://github.com/xyproto/mooseware), a skeleton for writing a Negroni middleware handler. - -## Live code reload? -[gin](https://github.com/codegangsta/gin) and [fresh](https://github.com/pilu/fresh) both live reload negroni apps. - -## Essential Reading for Beginners of Go & Negroni - -* [Using a Context to pass information from middleware to end handler](http://elithrar.github.io/article/map-string-interface/) -* [Understanding middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) - -## About - -Negroni is obsessively designed by none other than the [Code Gangsta](http://codegangsta.io/) diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/doc.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/doc.go deleted file mode 100644 index 24d6572c2..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -// Package negroni is an idiomatic approach to web middleware in Go. It is tiny, non-intrusive, and encourages use of net/http Handlers. -// -// If you like the idea of Martini, but you think it contains too much magic, then Negroni is a great fit. -// -// For a full guide visit http://github.com/codegangsta/negroni -// -// package main -// -// import ( -// "github.com/codegangsta/negroni" -// "net/http" -// "fmt" -// ) -// -// func main() { -// mux := http.NewServeMux() -// mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { -// fmt.Fprintf(w, "Welcome to the home page!") -// }) -// -// n := negroni.Classic() -// n.UseHandler(mux) -// n.Run(":3000") -// } -package negroni diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/logger.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/logger.go deleted file mode 100644 index e3828ef31..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/logger.go +++ /dev/null @@ -1,29 +0,0 @@ -package negroni - -import ( - "log" - "net/http" - "os" - "time" -) - -// Logger is a middleware handler that logs the request as it goes in and the response as it goes out. -type Logger struct { - // Logger inherits from log.Logger used to log messages with the Logger middleware - *log.Logger -} - -// NewLogger returns a new Logger instance -func NewLogger() *Logger { - return &Logger{log.New(os.Stdout, "[negroni] ", 0)} -} - -func (l *Logger) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - start := time.Now() - l.Printf("Started %s %s", r.Method, r.URL.Path) - - next(rw, r) - - res := rw.(ResponseWriter) - l.Printf("Completed %v %s in %v", res.Status(), http.StatusText(res.Status()), time.Since(start)) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/logger_test.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/logger_test.go deleted file mode 100644 index 880337d1a..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/logger_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package negroni - -import ( - "bytes" - "log" - "net/http" - "net/http/httptest" - "testing" -) - -func Test_Logger(t *testing.T) { - buff := bytes.NewBufferString("") - recorder := httptest.NewRecorder() - - l := NewLogger() - l.Logger = log.New(buff, "[negroni] ", 0) - - n := New() - // replace log for testing - n.Use(l) - n.UseHandler(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.WriteHeader(http.StatusNotFound) - })) - - req, err := http.NewRequest("GET", "http://localhost:3000/foobar", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(recorder, req) - expect(t, recorder.Code, http.StatusNotFound) - refute(t, len(buff.String()), 0) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/negroni.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/negroni.go deleted file mode 100644 index 57d15eb79..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/negroni.go +++ /dev/null @@ -1,129 +0,0 @@ -package negroni - -import ( - "log" - "net/http" - "os" -) - -// Handler handler is an interface that objects can implement to be registered to serve as middleware -// in the Negroni middleware stack. -// ServeHTTP should yield to the next middleware in the chain by invoking the next http.HandlerFunc -// passed in. -// -// If the Handler writes to the ResponseWriter, the next http.HandlerFunc should not be invoked. -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} - -// HandlerFunc is an adapter to allow the use of ordinary functions as Negroni handlers. -// If f is a function with the appropriate signature, HandlerFunc(f) is a Handler object that calls f. -type HandlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) - -func (h HandlerFunc) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - h(rw, r, next) -} - -type middleware struct { - handler Handler - next *middleware -} - -func (m middleware) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - m.handler.ServeHTTP(rw, r, m.next.ServeHTTP) -} - -// Wrap converts a http.Handler into a negroni.Handler so it can be used as a Negroni -// middleware. The next http.HandlerFunc is automatically called after the Handler -// is executed. -func Wrap(handler http.Handler) Handler { - return HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - handler.ServeHTTP(rw, r) - next(rw, r) - }) -} - -// Negroni is a stack of Middleware Handlers that can be invoked as an http.Handler. -// Negroni middleware is evaluated in the order that they are added to the stack using -// the Use and UseHandler methods. -type Negroni struct { - middleware middleware - handlers []Handler -} - -// New returns a new Negroni instance with no middleware preconfigured. -func New(handlers ...Handler) *Negroni { - return &Negroni{ - handlers: handlers, - middleware: build(handlers), - } -} - -// Classic returns a new Negroni instance with the default middleware already -// in the stack. -// -// Recovery - Panic Recovery Middleware -// Logger - Request/Response Logging -// Static - Static File Serving -func Classic() *Negroni { - return New(NewRecovery(), NewLogger(), NewStatic(http.Dir("public"))) -} - -func (n *Negroni) ServeHTTP(rw http.ResponseWriter, r *http.Request) { - n.middleware.ServeHTTP(NewResponseWriter(rw), r) -} - -// Use adds a Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. -func (n *Negroni) Use(handler Handler) { - n.handlers = append(n.handlers, handler) - n.middleware = build(n.handlers) -} - -// UseFunc adds a Negroni-style handler function onto the middleware stack. -func (n *Negroni) UseFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc)) { - n.Use(HandlerFunc(handlerFunc)) -} - -// UseHandler adds a http.Handler onto the middleware stack. Handlers are invoked in the order they are added to a Negroni. -func (n *Negroni) UseHandler(handler http.Handler) { - n.Use(Wrap(handler)) -} - -// UseHandler adds a http.HandlerFunc-style handler function onto the middleware stack. -func (n *Negroni) UseHandlerFunc(handlerFunc func(rw http.ResponseWriter, r *http.Request)) { - n.UseHandler(http.HandlerFunc(handlerFunc)) -} - -// Run is a convenience function that runs the negroni stack as an HTTP -// server. The addr string takes the same format as http.ListenAndServe. -func (n *Negroni) Run(addr string) { - l := log.New(os.Stdout, "[negroni] ", 0) - l.Printf("listening on %s", addr) - l.Fatal(http.ListenAndServe(addr, n)) -} - -// Returns a list of all the handlers in the current Negroni middleware chain. -func (n *Negroni) Handlers() []Handler { - return n.handlers -} - -func build(handlers []Handler) middleware { - var next middleware - - if len(handlers) == 0 { - return voidMiddleware() - } else if len(handlers) > 1 { - next = build(handlers[1:]) - } else { - next = voidMiddleware() - } - - return middleware{handlers[0], &next} -} - -func voidMiddleware() middleware { - return middleware{ - HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) {}), - &middleware{}, - } -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/negroni_test.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/negroni_test.go deleted file mode 100644 index 9fac00856..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/negroni_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package negroni - -import ( - "net/http" - "net/http/httptest" - "reflect" - "testing" -) - -/* Test Helpers */ -func expect(t *testing.T, a interface{}, b interface{}) { - if a != b { - t.Errorf("Expected %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func refute(t *testing.T, a interface{}, b interface{}) { - if a == b { - t.Errorf("Did not expect %v (type %v) - Got %v (type %v)", b, reflect.TypeOf(b), a, reflect.TypeOf(a)) - } -} - -func TestNegroniRun(t *testing.T) { - // just test that Run doesn't bomb - go New().Run(":3000") -} - -func TestNegroniServeHTTP(t *testing.T) { - result := "" - response := httptest.NewRecorder() - - n := New() - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - result += "foo" - next(rw, r) - result += "ban" - })) - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - result += "bar" - next(rw, r) - result += "baz" - })) - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - result += "bat" - rw.WriteHeader(http.StatusBadRequest) - })) - - n.ServeHTTP(response, (*http.Request)(nil)) - - expect(t, result, "foobarbatbazban") - expect(t, response.Code, http.StatusBadRequest) -} - -// Ensures that a Negroni middleware chain -// can correctly return all of its handlers. -func TestHandlers(t *testing.T) { - response := httptest.NewRecorder() - n := New() - handlers := n.Handlers() - expect(t, 0, len(handlers)) - - n.Use(HandlerFunc(func(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - rw.WriteHeader(http.StatusOK) - })) - - // Expects the length of handlers to be exactly 1 - // after adding exactly one handler to the middleware chain - handlers = n.Handlers() - expect(t, 1, len(handlers)) - - // Ensures that the first handler that is in sequence behaves - // exactly the same as the one that was registered earlier - handlers[0].ServeHTTP(response, (*http.Request)(nil), nil) - expect(t, response.Code, http.StatusOK) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/recovery.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/recovery.go deleted file mode 100644 index d790cade6..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/recovery.go +++ /dev/null @@ -1,46 +0,0 @@ -package negroni - -import ( - "fmt" - "log" - "net/http" - "os" - "runtime" -) - -// Recovery is a Negroni middleware that recovers from any panics and writes a 500 if there was one. -type Recovery struct { - Logger *log.Logger - PrintStack bool - StackAll bool - StackSize int -} - -// NewRecovery returns a new instance of Recovery -func NewRecovery() *Recovery { - return &Recovery{ - Logger: log.New(os.Stdout, "[negroni] ", 0), - PrintStack: true, - StackAll: false, - StackSize: 1024 * 8, - } -} - -func (rec *Recovery) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - defer func() { - if err := recover(); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - stack := make([]byte, rec.StackSize) - stack = stack[:runtime.Stack(stack, rec.StackAll)] - - f := "PANIC: %s\n%s" - rec.Logger.Printf(f, err, stack) - - if rec.PrintStack { - fmt.Fprintf(rw, f, err, stack) - } - } - }() - - next(rw, r) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/recovery_test.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/recovery_test.go deleted file mode 100644 index 3fa264acb..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/recovery_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package negroni - -import ( - "bytes" - "log" - "net/http" - "net/http/httptest" - "testing" -) - -func TestRecovery(t *testing.T) { - buff := bytes.NewBufferString("") - recorder := httptest.NewRecorder() - - rec := NewRecovery() - rec.Logger = log.New(buff, "[negroni] ", 0) - - n := New() - // replace log for testing - n.Use(rec) - n.UseHandler(http.HandlerFunc(func(res http.ResponseWriter, req *http.Request) { - panic("here is a panic!") - })) - n.ServeHTTP(recorder, (*http.Request)(nil)) - expect(t, recorder.Code, http.StatusInternalServerError) - refute(t, recorder.Body.Len(), 0) - refute(t, len(buff.String()), 0) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer.go deleted file mode 100644 index ea86a2657..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer.go +++ /dev/null @@ -1,96 +0,0 @@ -package negroni - -import ( - "bufio" - "fmt" - "net" - "net/http" -) - -// ResponseWriter is a wrapper around http.ResponseWriter that provides extra information about -// the response. It is recommended that middleware handlers use this construct to wrap a responsewriter -// if the functionality calls for it. -type ResponseWriter interface { - http.ResponseWriter - http.Flusher - // Status returns the status code of the response or 0 if the response has not been written. - Status() int - // Written returns whether or not the ResponseWriter has been written. - Written() bool - // Size returns the size of the response body. - Size() int - // Before allows for a function to be called before the ResponseWriter has been written to. This is - // useful for setting headers or any other operations that must happen before a response has been written. - Before(func(ResponseWriter)) -} - -type beforeFunc func(ResponseWriter) - -// NewResponseWriter creates a ResponseWriter that wraps an http.ResponseWriter -func NewResponseWriter(rw http.ResponseWriter) ResponseWriter { - return &responseWriter{rw, 0, 0, nil} -} - -type responseWriter struct { - http.ResponseWriter - status int - size int - beforeFuncs []beforeFunc -} - -func (rw *responseWriter) WriteHeader(s int) { - rw.status = s - rw.callBefore() - rw.ResponseWriter.WriteHeader(s) -} - -func (rw *responseWriter) Write(b []byte) (int, error) { - if !rw.Written() { - // The status will be StatusOK if WriteHeader has not been called yet - rw.WriteHeader(http.StatusOK) - } - size, err := rw.ResponseWriter.Write(b) - rw.size += size - return size, err -} - -func (rw *responseWriter) Status() int { - return rw.status -} - -func (rw *responseWriter) Size() int { - return rw.size -} - -func (rw *responseWriter) Written() bool { - return rw.status != 0 -} - -func (rw *responseWriter) Before(before func(ResponseWriter)) { - rw.beforeFuncs = append(rw.beforeFuncs, before) -} - -func (rw *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := rw.ResponseWriter.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} - -func (rw *responseWriter) CloseNotify() <-chan bool { - return rw.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (rw *responseWriter) callBefore() { - for i := len(rw.beforeFuncs) - 1; i >= 0; i-- { - rw.beforeFuncs[i](rw) - } -} - -func (rw *responseWriter) Flush() { - flusher, ok := rw.ResponseWriter.(http.Flusher) - if ok { - flusher.Flush() - } -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer_test.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer_test.go deleted file mode 100644 index ed1ee70a6..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/response_writer_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package negroni - -import ( - "bufio" - "net" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -type closeNotifyingRecorder struct { - *httptest.ResponseRecorder - closed chan bool -} - -func newCloseNotifyingRecorder() *closeNotifyingRecorder { - return &closeNotifyingRecorder{ - httptest.NewRecorder(), - make(chan bool, 1), - } -} - -func (c *closeNotifyingRecorder) close() { - c.closed <- true -} - -func (c *closeNotifyingRecorder) CloseNotify() <-chan bool { - return c.closed -} - -type hijackableResponse struct { - Hijacked bool -} - -func newHijackableResponse() *hijackableResponse { - return &hijackableResponse{} -} - -func (h *hijackableResponse) Header() http.Header { return nil } -func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil } -func (h *hijackableResponse) WriteHeader(code int) {} -func (h *hijackableResponse) Flush() {} -func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h.Hijacked = true - return nil, nil, nil -} - -func TestResponseWriterWritingString(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - rw.Write([]byte("Hello world")) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "Hello world") - expect(t, rw.Status(), http.StatusOK) - expect(t, rw.Size(), 11) - expect(t, rw.Written(), true) -} - -func TestResponseWriterWritingStrings(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - rw.Write([]byte("Hello world")) - rw.Write([]byte("foo bar bat baz")) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "Hello worldfoo bar bat baz") - expect(t, rw.Status(), http.StatusOK) - expect(t, rw.Size(), 26) -} - -func TestResponseWriterWritingHeader(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - rw.WriteHeader(http.StatusNotFound) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "") - expect(t, rw.Status(), http.StatusNotFound) - expect(t, rw.Size(), 0) -} - -func TestResponseWriterBefore(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - result := "" - - rw.Before(func(ResponseWriter) { - result += "foo" - }) - rw.Before(func(ResponseWriter) { - result += "bar" - }) - - rw.WriteHeader(http.StatusNotFound) - - expect(t, rec.Code, rw.Status()) - expect(t, rec.Body.String(), "") - expect(t, rw.Status(), http.StatusNotFound) - expect(t, rw.Size(), 0) - expect(t, result, "barfoo") -} - -func TestResponseWriterHijack(t *testing.T) { - hijackable := newHijackableResponse() - rw := NewResponseWriter(hijackable) - hijacker, ok := rw.(http.Hijacker) - expect(t, ok, true) - _, _, err := hijacker.Hijack() - if err != nil { - t.Error(err) - } - expect(t, hijackable.Hijacked, true) -} - -func TestResponseWriteHijackNotOK(t *testing.T) { - hijackable := new(http.ResponseWriter) - rw := NewResponseWriter(*hijackable) - hijacker, ok := rw.(http.Hijacker) - expect(t, ok, true) - _, _, err := hijacker.Hijack() - - refute(t, err, nil) -} - -func TestResponseWriterCloseNotify(t *testing.T) { - rec := newCloseNotifyingRecorder() - rw := NewResponseWriter(rec) - closed := false - notifier := rw.(http.CloseNotifier).CloseNotify() - rec.close() - select { - case <-notifier: - closed = true - case <-time.After(time.Second): - } - expect(t, closed, true) -} - -func TestResponseWriterFlusher(t *testing.T) { - rec := httptest.NewRecorder() - rw := NewResponseWriter(rec) - - _, ok := rw.(http.Flusher) - expect(t, ok, true) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/static.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/static.go deleted file mode 100644 index c5af4e688..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/static.go +++ /dev/null @@ -1,84 +0,0 @@ -package negroni - -import ( - "net/http" - "path" - "strings" -) - -// Static is a middleware handler that serves static files in the given directory/filesystem. -type Static struct { - // Dir is the directory to serve static files from - Dir http.FileSystem - // Prefix is the optional prefix used to serve the static directory content - Prefix string - // IndexFile defines which file to serve as index if it exists. - IndexFile string -} - -// NewStatic returns a new instance of Static -func NewStatic(directory http.FileSystem) *Static { - return &Static{ - Dir: directory, - Prefix: "", - IndexFile: "index.html", - } -} - -func (s *Static) ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - if r.Method != "GET" && r.Method != "HEAD" { - next(rw, r) - return - } - file := r.URL.Path - // if we have a prefix, filter requests by stripping the prefix - if s.Prefix != "" { - if !strings.HasPrefix(file, s.Prefix) { - next(rw, r) - return - } - file = file[len(s.Prefix):] - if file != "" && file[0] != '/' { - next(rw, r) - return - } - } - f, err := s.Dir.Open(file) - if err != nil { - // discard the error? - next(rw, r) - return - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - next(rw, r) - return - } - - // try to serve index file - if fi.IsDir() { - // redirect if missing trailing slash - if !strings.HasSuffix(r.URL.Path, "/") { - http.Redirect(rw, r, r.URL.Path+"/", http.StatusFound) - return - } - - file = path.Join(file, s.IndexFile) - f, err = s.Dir.Open(file) - if err != nil { - next(rw, r) - return - } - defer f.Close() - - fi, err = f.Stat() - if err != nil || fi.IsDir() { - next(rw, r) - return - } - } - - http.ServeContent(rw, r, file, fi.ModTime(), f) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/static_test.go b/Godeps/_workspace/src/github.com/codegangsta/negroni/static_test.go deleted file mode 100644 index 637cfcd68..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/static_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package negroni - -import ( - "bytes" - "net/http" - "net/http/httptest" - "testing" -) - -func TestStatic(t *testing.T) { - response := httptest.NewRecorder() - response.Body = new(bytes.Buffer) - - n := New() - n.Use(NewStatic(http.Dir("."))) - - req, err := http.NewRequest("GET", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) - expect(t, response.Header().Get("Expires"), "") - if response.Body.Len() == 0 { - t.Errorf("Got empty body for GET request") - } -} - -func TestStaticHead(t *testing.T) { - response := httptest.NewRecorder() - response.Body = new(bytes.Buffer) - - n := New() - n.Use(NewStatic(http.Dir("."))) - n.UseHandler(http.NotFoundHandler()) - - req, err := http.NewRequest("HEAD", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) - if response.Body.Len() != 0 { - t.Errorf("Got non-empty body for HEAD request") - } -} - -func TestStaticAsPost(t *testing.T) { - response := httptest.NewRecorder() - - n := New() - n.Use(NewStatic(http.Dir("."))) - n.UseHandler(http.NotFoundHandler()) - - req, err := http.NewRequest("POST", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusNotFound) -} - -func TestStaticBadDir(t *testing.T) { - response := httptest.NewRecorder() - - n := Classic() - n.UseHandler(http.NotFoundHandler()) - - req, err := http.NewRequest("GET", "http://localhost:3000/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - refute(t, response.Code, http.StatusOK) -} - -func TestStaticOptionsServeIndex(t *testing.T) { - response := httptest.NewRecorder() - - n := New() - s := NewStatic(http.Dir(".")) - s.IndexFile = "negroni.go" - n.Use(s) - - req, err := http.NewRequest("GET", "http://localhost:3000/", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) -} - -func TestStaticOptionsPrefix(t *testing.T) { - response := httptest.NewRecorder() - - n := New() - s := NewStatic(http.Dir(".")) - s.Prefix = "/public" - n.Use(s) - - // Check file content behaviour - req, err := http.NewRequest("GET", "http://localhost:3000/public/negroni.go", nil) - if err != nil { - t.Error(err) - } - - n.ServeHTTP(response, req) - expect(t, response.Code, http.StatusOK) -} diff --git a/Godeps/_workspace/src/github.com/codegangsta/negroni/translations/README_pt_br.md b/Godeps/_workspace/src/github.com/codegangsta/negroni/translations/README_pt_br.md deleted file mode 100644 index d5b02fa30..000000000 --- a/Godeps/_workspace/src/github.com/codegangsta/negroni/translations/README_pt_br.md +++ /dev/null @@ -1,170 +0,0 @@ -# Negroni [![GoDoc](https://godoc.org/github.com/codegangsta/negroni?status.svg)](http://godoc.org/github.com/codegangsta/negroni) [![wercker status](https://app.wercker.com/status/13688a4a94b82d84a0b8d038c4965b61/s "wercker status")](https://app.wercker.com/project/bykey/13688a4a94b82d84a0b8d038c4965b61) - -Negroni é uma abordagem idiomática para middleware web em Go. É pequeno, não intrusivo, e incentiva uso da biblioteca `net/http`. - -Se gosta da idéia do [Martini](http://github.com/go-martini/martini), mas acha que contém muita mágica, então Negroni é ideal. - -## Começando - -Depois de instalar Go e definir seu [GOPATH](http://golang.org/doc/code.html#GOPATH), criar seu primeirto arquivo `.go`. Iremos chamá-lo `server.go`. - -~~~ go -package main - -import ( - "github.com/codegangsta/negroni" - "net/http" - "fmt" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -Depois instale o pacote Negroni (**go 1.1** ou superior) -~~~ -go get github.com/codegangsta/negroni -~~~ - -Depois execute seu servidor: -~~~ -go run server.go -~~~ - -Agora terá um servidor web Go net/http rodando em `localhost:3000`. - -## Precisa de Ajuda? -Se você tem uma pergunta ou pedido de recurso,[go ask the mailing list](https://groups.google.com/forum/#!forum/negroni-users). O Github issues para o Negroni será usado exclusivamente para Reportar bugs e pull requests. - -## Negroni é um Framework? -Negroni **não** é a framework. É uma biblioteca que é desenhada para trabalhar diretamente com net/http. - -## Roteamento? -Negroni é TSPR(Traga seu próprio Roteamento). A comunidade Go já tem um grande número de roteadores http disponíveis, Negroni tenta rodar bem com todos eles pelo suporte total `net/http`/ Por exemplo, a integração com [Gorilla Mux](http://github.com/gorilla/mux) se parece com isso: - -~~~ go -router := mux.NewRouter() -router.HandleFunc("/", HomeHandler) - -n := negroni.New(Middleware1, Middleware2) -// Or use a middleware with the Use() function -n.Use(Middleware3) -// router goes last -n.UseHandler(router) - -n.Run(":3000") -~~~ - -## `negroni.Classic()` -`negroni.Classic()` fornece alguns middlewares padrão que são úteis para maioria das aplicações: - -* `negroni.Recovery` - Panic Recovery Middleware. -* `negroni.Logging` - Request/Response Logging Middleware. -* `negroni.Static` - Static File serving under the "public" directory. - -Isso torna muito fácil começar com alguns recursos úteis do Negroni. - -## Handlers -Negroni fornece um middleware de fluxo bidirecional. Isso é feito através da interface `negroni.Handler`: - -~~~ go -type Handler interface { - ServeHTTP(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) -} -~~~ - -Se um middleware não tenha escrito o ResponseWriter, ele deve chamar a próxima `http.HandlerFunc` na cadeia para produzir o próximo handler middleware. Isso pode ser usado muito bem: - -~~~ go -func MyMiddleware(rw http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - // do some stuff before - next(rw, r) - // do some stuff after -} -~~~ - -E pode mapear isso para a cadeia de handler com a função `Use`: - -~~~ go -n := negroni.New() -n.Use(negroni.HandlerFunc(MyMiddleware)) -~~~ - -Você também pode mapear `http.Handler` antigos: - -~~~ go -n := negroni.New() - -mux := http.NewServeMux() -// map your routes - -n.UseHandler(mux) - -n.Run(":3000") -~~~ - -## `Run()` -Negroni tem uma função de conveniência chamada `Run`. `Run` pega um endereço de string idêntico para [http.ListenAndServe](http://golang.org/pkg/net/http#ListenAndServe). - -~~~ go -n := negroni.Classic() -// ... -log.Fatal(http.ListenAndServe(":8080", n)) -~~~ - -## Middleware para Rotas Específicas -Se você tem um grupo de rota com rotas que precisam ser executadas por um middleware específico, pode simplesmente criar uma nova instância de Negroni e usar no seu Manipulador de rota. - -~~~ go -router := mux.NewRouter() -adminRoutes := mux.NewRouter() -// add admin routes here - -// Criar um middleware negroni para admin -router.Handle("/admin", negroni.New( - Middleware1, - Middleware2, - negroni.Wrap(adminRoutes), -)) -~~~ - -## Middleware de Terceiros - -Aqui está uma lista atual de Middleware Compatíveis com Negroni. Sinta se livre para mandar um PR vinculando seu middleware se construiu um: - - -| Middleware | Autor | Descrição | -| -----------|--------|-------------| -| [Graceful](https://github.com/stretchr/graceful) | [Tyler Bunnell](https://github.com/tylerb) | Graceful HTTP Shutdown | -| [secure](https://github.com/unrolled/secure) | [Cory Jacobsen](https://github.com/unrolled) | Implementa rapidamente itens de segurança.| -| [binding](https://github.com/mholt/binding) | [Matt Holt](https://github.com/mholt) | Handler para mapeamento/validação de um request a estrutura. | -| [logrus](https://github.com/meatballhat/negroni-logrus) | [Dan Buch](https://github.com/meatballhat) | Logrus-based logger | -| [render](https://github.com/unrolled/render) | [Cory Jacobsen](https://github.com/unrolled) | Pacote para renderizar JSON, XML, e templates HTML. | -| [gorelic](https://github.com/jingweno/negroni-gorelic) | [Jingwen Owen Ou](https://github.com/jingweno) | New Relic agent for Go runtime | -| [gzip](https://github.com/phyber/negroni-gzip) | [phyber](https://github.com/phyber) | Handler para adicionar compreção gzip para as requisições | -| [oauth2](https://github.com/goincremental/negroni-oauth2) | [David Bochenski](https://github.com/bochenski) | Handler que prove sistema de login OAuth 2.0 para aplicações Martini. Google Sign-in, Facebook Connect e Github login são suportados. | -| [sessions](https://github.com/goincremental/negroni-sessions) | [David Bochenski](https://github.com/bochenski) | Handler que provê o serviço de sessão. | -| [permissions](https://github.com/xyproto/permissions) | [Alexander Rødseth](https://github.com/xyproto) | Cookies, usuários e permissões. | -| [onthefly](https://github.com/xyproto/onthefly) | [Alexander Rødseth](https://github.com/xyproto) | Pacote para gerar TinySVG, HTML e CSS em tempo real. | - -## Exemplos -[Alexander Rødseth](https://github.com/xyproto) criou [mooseware](https://github.com/xyproto/mooseware), uma estrutura para escrever um handler middleware Negroni. - -## Servidor com autoreload? -[gin](https://github.com/codegangsta/gin) e [fresh](https://github.com/pilu/fresh) são aplicativos para autoreload do Negroni. - -## Leitura Essencial para Iniciantes em Go & Negroni -* [Usando um contexto para passar informação de um middleware para o manipulador final](http://elithrar.github.io/article/map-string-interface/) -* [Entendendo middleware](http://mattstauffer.co/blog/laravel-5.0-middleware-replacing-filters) - - -## Sobre -Negroni é obsessivamente desenhado por ninguém menos que [Code Gangsta](http://codegangsta.io/) diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE deleted file mode 100644 index 5782c7269..000000000 --- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2014, Elazar Leibovich -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md deleted file mode 100644 index 795d3d320..000000000 --- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# go-bindata-assetfs - -Serve embedded files from [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) with `net/http`. - -[GoDoc](http://godoc.org/github.com/elazarl/go-bindata-assetfs) - -### Installation - -Install with - - $ go get github.com/jteeuwen/go-bindata/... - $ go get github.com/elazarl/go-bindata-assetfs/... - -### Creating embedded data - -Usage is identical to [jteeuwen/go-bindata](https://github.com/jteeuwen/go-bindata) usage, -instead of running `go-bindata` run `go-bindata-assetfs`. - -The tool will create a `bindata_assetfs.go` file, which contains the embedded data. - -A typical use case is - - $ go-bindata-assetfs data/... - -### Using assetFS in your code - -The generated file provides an `assetFS()` function that returns a `http.Filesystem` -wrapping the embedded files. What you usually want to do is: - - http.Handle("/", http.FileServer(assetFS())) - -This would run an HTTP server serving the embedded files. - -## Without running binary tool - -You can always just run the `go-bindata` tool, and then - -use - - import "github.com/elazarl/go-bindata-assetfs" - ... - http.Handle("/", - http.FileServer( - &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) - -to serve files embedded from the `data` directory. diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go deleted file mode 100644 index 5174d5a6d..000000000 --- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/assetfs.go +++ /dev/null @@ -1,147 +0,0 @@ -package assetfs - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "os" - "path" - "path/filepath" - "time" -) - -var ( - fileTimestamp = time.Now() -) - -// FakeFile implements os.FileInfo interface for a given path and size -type FakeFile struct { - // Path is the path of this file - Path string - // Dir marks of the path is a directory - Dir bool - // Len is the length of the fake file, zero if it is a directory - Len int64 -} - -func (f *FakeFile) Name() string { - _, name := filepath.Split(f.Path) - return name -} - -func (f *FakeFile) Mode() os.FileMode { - mode := os.FileMode(0644) - if f.Dir { - return mode | os.ModeDir - } - return mode -} - -func (f *FakeFile) ModTime() time.Time { - return fileTimestamp -} - -func (f *FakeFile) Size() int64 { - return f.Len -} - -func (f *FakeFile) IsDir() bool { - return f.Mode().IsDir() -} - -func (f *FakeFile) Sys() interface{} { - return nil -} - -// AssetFile implements http.File interface for a no-directory file with content -type AssetFile struct { - *bytes.Reader - io.Closer - FakeFile -} - -func NewAssetFile(name string, content []byte) *AssetFile { - return &AssetFile{ - bytes.NewReader(content), - ioutil.NopCloser(nil), - FakeFile{name, false, int64(len(content))}} -} - -func (f *AssetFile) Readdir(count int) ([]os.FileInfo, error) { - return nil, errors.New("not a directory") -} - -func (f *AssetFile) Size() int64 { - return f.FakeFile.Size() -} - -func (f *AssetFile) Stat() (os.FileInfo, error) { - return f, nil -} - -// AssetDirectory implements http.File interface for a directory -type AssetDirectory struct { - AssetFile - ChildrenRead int - Children []os.FileInfo -} - -func NewAssetDirectory(name string, children []string, fs *AssetFS) *AssetDirectory { - fileinfos := make([]os.FileInfo, 0, len(children)) - for _, child := range children { - _, err := fs.AssetDir(filepath.Join(name, child)) - fileinfos = append(fileinfos, &FakeFile{child, err == nil, 0}) - } - return &AssetDirectory{ - AssetFile{ - bytes.NewReader(nil), - ioutil.NopCloser(nil), - FakeFile{name, true, 0}, - }, - 0, - fileinfos} -} - -func (f *AssetDirectory) Readdir(count int) ([]os.FileInfo, error) { - if count <= 0 { - return f.Children, nil - } - if f.ChildrenRead+count > len(f.Children) { - count = len(f.Children) - f.ChildrenRead - } - rv := f.Children[f.ChildrenRead : f.ChildrenRead+count] - f.ChildrenRead += count - return rv, nil -} - -func (f *AssetDirectory) Stat() (os.FileInfo, error) { - return f, nil -} - -// AssetFS implements http.FileSystem, allowing -// embedded files to be served from net/http package. -type AssetFS struct { - // Asset should return content of file in path if exists - Asset func(path string) ([]byte, error) - // AssetDir should return list of files in the path - AssetDir func(path string) ([]string, error) - // Prefix would be prepended to http requests - Prefix string -} - -func (fs *AssetFS) Open(name string) (http.File, error) { - name = path.Join(fs.Prefix, name) - if len(name) > 0 && name[0] == '/' { - name = name[1:] - } - if b, err := fs.Asset(name); err == nil { - return NewAssetFile(name, b), nil - } - if children, err := fs.AssetDir(name); err == nil { - return NewAssetDirectory(name, children, fs), nil - } else { - return nil, err - } -} diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go deleted file mode 100644 index a664249f3..000000000 --- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/doc.go +++ /dev/null @@ -1,13 +0,0 @@ -// assetfs allows packages to serve static content embedded -// with the go-bindata tool with the standard net/http package. -// -// See https://github.com/jteeuwen/go-bindata for more information -// about embedding binary data with go-bindata. -// -// Usage example, after running -// $ go-bindata data/... -// use: -// http.Handle("/", -// http.FileServer( -// &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: "data"})) -package assetfs diff --git a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go b/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go deleted file mode 100644 index a5b2b5eef..000000000 --- a/Godeps/_workspace/src/github.com/elazarl/go-bindata-assetfs/go-bindata-assetfs/main.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "flag" - "fmt" - "os" - "os/exec" - "strings" -) - -const bindatafile = "bindata.go" - -func isDebug(args []string) bool { - flagset := flag.NewFlagSet("", flag.ContinueOnError) - debug := flagset.Bool("debug", false, "") - debugArgs := make([]string, 0) - for _, arg := range args { - if strings.HasPrefix(arg, "-debug") { - debugArgs = append(debugArgs, arg) - } - } - flagset.Parse(debugArgs) - if debug == nil { - return false - } - return *debug -} - -func main() { - if _, err := exec.LookPath("go-bindata"); err != nil { - fmt.Println("Cannot find go-bindata executable in path") - fmt.Println("Maybe you need: go get github.com/elazarl/go-bindata-assetfs/...") - os.Exit(1) - } - cmd := exec.Command("go-bindata", os.Args[1:]...) - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Run(); err != nil { - os.Exit(1) - } - in, err := os.Open(bindatafile) - if err != nil { - fmt.Fprintln(os.Stderr, "Cannot read", bindatafile, err) - return - } - out, err := os.Create("bindata_assetfs.go") - if err != nil { - fmt.Fprintln(os.Stderr, "Cannot write 'bindata_assetfs.go'", err) - return - } - debug := isDebug(os.Args[1:]) - r := bufio.NewReader(in) - done := false - for line, isPrefix, err := r.ReadLine(); err == nil; line, isPrefix, err = r.ReadLine() { - if !isPrefix { - line = append(line, '\n') - } - if _, err := out.Write(line); err != nil { - fmt.Fprintln(os.Stderr, "Cannot write to 'bindata_assetfs.go'", err) - return - } - if !done && !isPrefix && bytes.HasPrefix(line, []byte("import (")) { - if debug { - fmt.Fprintln(out, "\t\"net/http\"") - } else { - fmt.Fprintln(out, "\t\"github.com/elazarl/go-bindata-assetfs\"") - } - done = true - } - } - if debug { - fmt.Fprintln(out, ` -func assetFS() http.FileSystem { - for k := range _bintree.Children { - return http.Dir(k) - } - panic("unreachable") -}`) - } else { - fmt.Fprintln(out, ` -func assetFS() *assetfs.AssetFS { - for k := range _bintree.Children { - return &assetfs.AssetFS{Asset: Asset, AssetDir: AssetDir, Prefix: k} - } - panic("unreachable") -}`) - } - // Close files BEFORE remove calls (don't use defer). - in.Close() - out.Close() - if err := os.Remove(bindatafile); err != nil { - fmt.Fprintln(os.Stderr, "Cannot remove", bindatafile, err) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore deleted file mode 100644 index 5f6b48eae..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# temporary symlink for testing -testing/data/symlink diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml deleted file mode 100644 index 927a999f0..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -sudo: false -go: - - 1.3.3 - - 1.4.2 - - 1.5.1 - - tip -env: - - GOARCH=amd64 - - GOARCH=386 -script: - - make test diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS deleted file mode 100644 index 418a808fb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS +++ /dev/null @@ -1,99 +0,0 @@ -# This is the official list of go-dockerclient authors for copyright purposes. - -Adam Bell-Hanssen -Aldrin Leal -Andreas Jaekle -Andrews Medina -Artem Sidorenko -Andy Goldstein -Ben Marini -Ben McCann -Brendan Fosberry -Brian Lalor -Brian Palmer -Bryan Boreham -Burke Libbey -Carlos Diaz-Padron -Cesar Wong -Cezar Sa Espinola -Cheah Chu Yeow -cheneydeng -CMGS -Craig Jellick -Dan Williams -Daniel, Dao Quang Minh -Daniel Garcia -Darren Shepherd -Dave Choi -David Huie -Dawn Chen -Dinesh Subhraveti -Ed -Erez Horev -Eric Anderson -Ewout Prangsma -Fabio Rehm -Fatih Arslan -Flavia Missi -Francisco Souza -Grégoire Delattre -Guillermo Álvarez Fernández -He Simei -Ivan Mikushin -James Bardin -Jari Kolehmainen -Jason Wilder -Jawher Moussa -Jean-Baptiste Dalido -Jeff Mitchell -Jeffrey Hulten -Jen Andre -Johan Euphrosine -Kamil Domanski -Karan Misra -Kim, Hirokuni -Kyle Allan -Liron Levin -Liu Peng -Lorenz Leutgeb -Lucas Clemente -Lucas Weiblen -Mantas Matelis -Martin Sweeney -Máximo Cuadros Ortiz -Michal Fojtik -Mike Dillon -Mrunal Patel -Nick Ethier -Omeid Matten -Orivej Desh -Paul Bellamy -Paul Morie -Paul Weil -Peter Edge -Peter Jihoon Kim -Phil Lu -Philippe Lafoucrière -Rafe Colton -Rob Miller -Robert Williamson -Salvador Gironès -Sam Rijs -Samuel Karp -Simon Eskildsen -Simon Menke -Skolos -Soulou -Sridhar Ratnakumar -Summer Mousa -Sunjin Lee -Tarsis Azevedo -Tim Schindler -Tobi Knaup -Tonic -ttyh061 -Victor Marmol -Vincenzo Prignano -Wiliam Souza -Ye Yin -Yuriy Bogdanov diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE deleted file mode 100644 index 706634474..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/DOCKER-LICENSE +++ /dev/null @@ -1,6 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -You can find the Docker license at the following link: -https://raw.githubusercontent.com/docker/docker/master/LICENSE diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE deleted file mode 100644 index 4e11de100..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2015, go-dockerclient authors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile deleted file mode 100644 index b8c2c99b3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/Makefile +++ /dev/null @@ -1,47 +0,0 @@ -.PHONY: \ - all \ - vendor \ - lint \ - vet \ - fmt \ - fmtcheck \ - pretest \ - test \ - cov \ - clean - -SRCS = $(shell git ls-files '*.go' | grep -v '^external/') -PKGS = ./. ./testing - -all: test - -vendor: - @ go get -v github.com/mjibson/party - party -d external -c -u - -lint: - @ go get -v github.com/golang/lint/golint - $(foreach file,$(SRCS),golint $(file) || exit;) - -vet: - @-go get -v golang.org/x/tools/cmd/vet - $(foreach pkg,$(PKGS),go vet $(pkg);) - -fmt: - gofmt -w $(SRCS) - -fmtcheck: - $(foreach file,$(SRCS),gofmt $(file) | diff -u $(file) - || exit;) - -pretest: lint vet fmtcheck - -test: pretest - $(foreach pkg,$(PKGS),go test $(pkg) || exit;) - -cov: - @ go get -v github.com/axw/gocov/gocov - @ go get golang.org/x/tools/cmd/cover - gocov test | gocov report - -clean: - $(foreach pkg,$(PKGS),go clean $(pkg) || exit;) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown deleted file mode 100644 index a124d0b45..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown +++ /dev/null @@ -1,106 +0,0 @@ -# go-dockerclient - -[![Drone](https://drone.io/github.com/fsouza/go-dockerclient/status.png)](https://drone.io/github.com/fsouza/go-dockerclient/latest) -[![Travis](https://img.shields.io/travis/fsouza/go-dockerclient.svg?style=flat-square)](https://travis-ci.org/fsouza/go-dockerclient) -[![GoDoc](https://img.shields.io/badge/api-Godoc-blue.svg?style=flat-square)](https://godoc.org/github.com/fsouza/go-dockerclient) - -This package presents a client for the Docker remote API. It also provides -support for the extensions in the [Swarm API](https://docs.docker.com/swarm/API/). - -This package also provides support for docker's network API, which is a simple -passthrough to the libnetwork remote API. Note that docker's network API is -only available in docker 1.8 and above, and only enabled in docker if -DOCKER_EXPERIMENTAL is defined during the docker build process. - -For more details, check the [remote API documentation](http://docs.docker.com/en/latest/reference/api/docker_remote_api/). - -## Vendoring - -If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored, -please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient -is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339) -for details. - -## Example - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "unix:///var/run/docker.sock" - client, _ := docker.NewClient(endpoint) - imgs, _ := client.ListImages(docker.ListImagesOptions{All: false}) - for _, img := range imgs { - fmt.Println("ID: ", img.ID) - fmt.Println("RepoTags: ", img.RepoTags) - fmt.Println("Created: ", img.Created) - fmt.Println("Size: ", img.Size) - fmt.Println("VirtualSize: ", img.VirtualSize) - fmt.Println("ParentId: ", img.ParentID) - } -} -``` - -## Using with TLS - -In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters. - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - endpoint := "tcp://[ip]:[port]" - path := os.Getenv("DOCKER_CERT_PATH") - ca := fmt.Sprintf("%s/ca.pem", path) - cert := fmt.Sprintf("%s/cert.pem", path) - key := fmt.Sprintf("%s/key.pem", path) - client, _ := docker.NewTLSClient(endpoint, cert, key, ca) - // use client -} -``` - -If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables -`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv. - - -```go -package main - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient" -) - -func main() { - client, _ := docker.NewClientFromEnv() - // use client -} -``` - -See the documentation for more details. - -## Developing - -All development commands can be seen in the [Makefile](Makefile). - -Commited code must pass: - -* [golint](https://github.com/golang/lint) -* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet) -* [gofmt](https://golang.org/cmd/gofmt) -* [go test](https://golang.org/cmd/go/#hdr-Test_packages) - -Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go deleted file mode 100644 index d2af8780c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path" - "strings" -) - -// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed. -var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg") - -// AuthConfiguration represents authentication options to use in the PushImage -// method. It represents the authentication in the Docker index server. -type AuthConfiguration struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Email string `json:"email,omitempty"` - ServerAddress string `json:"serveraddress,omitempty"` -} - -// AuthConfigurations represents authentication options to use for the -// PushImage method accommodating the new X-Registry-Config header -type AuthConfigurations struct { - Configs map[string]AuthConfiguration `json:"configs"` -} - -// AuthConfigurations119 is used to serialize a set of AuthConfigurations -// for Docker API >= 1.19. -type AuthConfigurations119 map[string]AuthConfiguration - -// dockerConfig represents a registry authentation configuration from the -// .dockercfg file. -type dockerConfig struct { - Auth string `json:"auth"` - Email string `json:"email"` -} - -// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the -// ~/.dockercfg file. -func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) { - var r io.Reader - var err error - p := path.Join(os.Getenv("HOME"), ".docker", "config.json") - r, err = os.Open(p) - if err != nil { - p := path.Join(os.Getenv("HOME"), ".dockercfg") - r, err = os.Open(p) - if err != nil { - return nil, err - } - } - return NewAuthConfigurations(r) -} - -// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the -// same format as the .dockercfg file. -func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) { - var auth *AuthConfigurations - confs, err := parseDockerConfig(r) - if err != nil { - return nil, err - } - auth, err = authConfigs(confs) - if err != nil { - return nil, err - } - return auth, nil -} - -func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) { - buf := new(bytes.Buffer) - buf.ReadFrom(r) - byteData := buf.Bytes() - - var confsWrapper map[string]map[string]dockerConfig - if err := json.Unmarshal(byteData, &confsWrapper); err == nil { - if confs, ok := confsWrapper["auths"]; ok { - return confs, nil - } - } - - var confs map[string]dockerConfig - if err := json.Unmarshal(byteData, &confs); err != nil { - return nil, err - } - return confs, nil -} - -// authConfigs converts a dockerConfigs map to a AuthConfigurations object. -func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) { - c := &AuthConfigurations{ - Configs: make(map[string]AuthConfiguration), - } - for reg, conf := range confs { - data, err := base64.StdEncoding.DecodeString(conf.Auth) - if err != nil { - return nil, err - } - userpass := strings.Split(string(data), ":") - if len(userpass) != 2 { - return nil, ErrCannotParseDockercfg - } - c.Configs[reg] = AuthConfiguration{ - Email: conf.Email, - Username: userpass[0], - Password: userpass[1], - ServerAddress: reg, - } - } - return c, nil -} - -// AuthCheck validates the given credentials. It returns nil if successful. -// -// See https://goo.gl/m2SleN for more details. -func (c *Client) AuthCheck(conf *AuthConfiguration) error { - if conf == nil { - return fmt.Errorf("conf is nil") - } - body, statusCode, err := c.do("POST", "/auth", doOptions{data: conf}) - if err != nil { - return err - } - if statusCode > 400 { - return fmt.Errorf("auth error (%d): %s", statusCode, body) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go deleted file mode 100644 index b3d4f8fc8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/auth_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/base64" - "fmt" - "net/http" - "strings" - "testing" -) - -func TestAuthLegacyConfig(t *testing.T) { - auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) - read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pass"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthBadConfig(t *testing.T) { - auth := base64.StdEncoding.EncodeToString([]byte("userpass")) - read := strings.NewReader(fmt.Sprintf(`{"docker.io":{"auth":"%s","email":"user@example.com"}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != ErrCannotParseDockercfg { - t.Errorf("Incorrect error returned %v\n", err) - } - if ac != nil { - t.Errorf("Invalid auth configuration returned, should be nil %v\n", ac) - } -} - -func TestAuthConfig(t *testing.T) { - auth := base64.StdEncoding.EncodeToString([]byte("user:pass")) - read := strings.NewReader(fmt.Sprintf(`{"auths":{"docker.io":{"auth":"%s","email":"user@example.com"}}}`, auth)) - ac, err := NewAuthConfigurations(read) - if err != nil { - t.Error(err) - } - c, ok := ac.Configs["docker.io"] - if !ok { - t.Error("NewAuthConfigurations: Expected Configs to contain docker.io") - } - if got, want := c.Email, "user@example.com"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Email: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Username, "user"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Username: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.Password, "pass"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].Password: wrong result. Want %q. Got %q`, want, got) - } - if got, want := c.ServerAddress, "docker.io"; got != want { - t.Errorf(`AuthConfigurations.Configs["docker.io"].ServerAddress: wrong result. Want %q. Got %q`, want, got) - } -} - -func TestAuthCheck(t *testing.T) { - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - if err := client.AuthCheck(nil); err == nil { - t.Fatalf("expected error on nil auth config") - } - // test good auth - if err := client.AuthCheck(&AuthConfiguration{}); err != nil { - t.Fatal(err) - } - *fakeRT = FakeRoundTripper{status: http.StatusUnauthorized} - if err := client.AuthCheck(&AuthConfiguration{}); err == nil { - t.Fatal("expected failure from unauthorized auth") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go deleted file mode 100644 index c9640f205..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/build_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package docker - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "os" - "reflect" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" -) - -func TestBuildImageMultipleContextsError(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - InputStream: &buf, - OutputStream: &buf, - ContextDir: "testing/data", - } - err := client.BuildImage(opts) - if err != ErrMultipleContexts { - t.Errorf("BuildImage: providing both InputStream and ContextDir should produce an error") - } -} - -func TestBuildImageContextDirDockerignoreParsing(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - - if err := os.Symlink("doesnotexist", "testing/data/symlink"); err != nil { - t.Errorf("error creating symlink on demand: %s", err) - } - defer func() { - if err := os.Remove("testing/data/symlink"); err != nil { - t.Errorf("error removing symlink on demand: %s", err) - } - }() - - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - OutputStream: &buf, - ContextDir: "testing/data", - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - reqBody := fakeRT.requests[0].Body - tmpdir, err := unpackBodyTarball(reqBody) - if err != nil { - t.Fatal(err) - } - - defer func() { - if err := os.RemoveAll(tmpdir); err != nil { - t.Fatal(err) - } - }() - - files, err := ioutil.ReadDir(tmpdir) - if err != nil { - t.Fatal(err) - } - - foundFiles := []string{} - for _, file := range files { - foundFiles = append(foundFiles, file.Name()) - } - - expectedFiles := []string{ - ".dockerignore", - "Dockerfile", - "barfile", - "ca.pem", - "cert.pem", - "key.pem", - "server.pem", - "serverkey.pem", - "symlink", - } - - if !reflect.DeepEqual(expectedFiles, foundFiles) { - t.Errorf( - "BuildImage: incorrect files sent in tarball to docker server\nexpected %+v, found %+v", - expectedFiles, foundFiles, - ) - } -} - -func TestBuildImageSendXRegistryConfig(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - OutputStream: &buf, - ContextDir: "testing/data", - AuthConfigs: AuthConfigurations{ - Configs: map[string]AuthConfiguration{ - "quay.io": { - Username: "foo", - Password: "bar", - Email: "baz", - ServerAddress: "quay.io", - }, - }, - }, - } - - encodedConfig := "eyJjb25maWdzIjp7InF1YXkuaW8iOnsidXNlcm5hbWUiOiJmb28iLCJwYXNzd29yZCI6ImJhciIsImVtYWlsIjoiYmF6Iiwic2VydmVyYWRkcmVzcyI6InF1YXkuaW8ifX19Cg==" - - if err := client.BuildImage(opts); err != nil { - t.Fatal(err) - } - - xRegistryConfig := fakeRT.requests[0].Header["X-Registry-Config"][0] - if xRegistryConfig != encodedConfig { - t.Errorf( - "BuildImage: X-Registry-Config not set currectly: expected %q, got %q", - encodedConfig, - xRegistryConfig, - ) - } -} - -func unpackBodyTarball(req io.ReadCloser) (tmpdir string, err error) { - tmpdir, err = ioutil.TempDir("", "go-dockerclient-test") - if err != nil { - return - } - err = archive.Untar(req, tmpdir, &archive.TarOptions{ - Compression: archive.Uncompressed, - NoLchown: true, - }) - return -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go deleted file mode 100644 index d133594d4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import "fmt" - -// ChangeType is a type for constants indicating the type of change -// in a container -type ChangeType int - -const ( - // ChangeModify is the ChangeType for container modifications - ChangeModify ChangeType = iota - - // ChangeAdd is the ChangeType for additions to a container - ChangeAdd - - // ChangeDelete is the ChangeType for deletions from a container - ChangeDelete -) - -// Change represents a change in a container. -// -// See https://goo.gl/9GsTIF for more details. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go deleted file mode 100644 index 7c2ec30f7..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/change_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "testing" -) - -func TestChangeString(t *testing.T) { - var tests = []struct { - change Change - expected string - }{ - {Change{"/etc/passwd", ChangeModify}, "C /etc/passwd"}, - {Change{"/etc/passwd", ChangeAdd}, "A /etc/passwd"}, - {Change{"/etc/passwd", ChangeDelete}, "D /etc/passwd"}, - {Change{"/etc/passwd", 33}, " /etc/passwd"}, - } - for _, tt := range tests { - if got := tt.change.String(); got != tt.expected { - t.Errorf("Change.String(): want %q. Got %q.", tt.expected, got) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go deleted file mode 100644 index d464b667d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go +++ /dev/null @@ -1,847 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package docker provides a client for the Docker remote API. -// -// See https://goo.gl/G3plxW for more details on the remote API. -package docker - -import ( - "bufio" - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httputil" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strconv" - "strings" - - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" -) - -const userAgent = "go-dockerclient" - -var ( - // ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL. - ErrInvalidEndpoint = errors.New("invalid endpoint") - - // ErrConnectionRefused is returned when the client cannot connect to the given endpoint. - ErrConnectionRefused = errors.New("cannot connect to Docker endpoint") - - apiVersion112, _ = NewAPIVersion("1.12") - - apiVersion119, _ = NewAPIVersion("1.19") -) - -// APIVersion is an internal representation of a version of the Remote API. -type APIVersion []int - -// NewAPIVersion returns an instance of APIVersion for the given string. -// -// The given string must be in the form .., where , -// and are integer numbers. -func NewAPIVersion(input string) (APIVersion, error) { - if !strings.Contains(input, ".") { - return nil, fmt.Errorf("Unable to parse version %q", input) - } - arr := strings.Split(input, ".") - ret := make(APIVersion, len(arr)) - var err error - for i, val := range arr { - ret[i], err = strconv.Atoi(val) - if err != nil { - return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val) - } - } - return ret, nil -} - -func (version APIVersion) String() string { - var str string - for i, val := range version { - str += strconv.Itoa(val) - if i < len(version)-1 { - str += "." - } - } - return str -} - -// LessThan is a function for comparing APIVersion structs -func (version APIVersion) LessThan(other APIVersion) bool { - return version.compare(other) < 0 -} - -// LessThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool { - return version.compare(other) <= 0 -} - -// GreaterThan is a function for comparing APIVersion structs -func (version APIVersion) GreaterThan(other APIVersion) bool { - return version.compare(other) > 0 -} - -// GreaterThanOrEqualTo is a function for comparing APIVersion structs -func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool { - return version.compare(other) >= 0 -} - -func (version APIVersion) compare(other APIVersion) int { - for i, v := range version { - if i <= len(other)-1 { - otherVersion := other[i] - - if v < otherVersion { - return -1 - } else if v > otherVersion { - return 1 - } - } - } - if len(version) > len(other) { - return 1 - } - if len(version) < len(other) { - return -1 - } - return 0 -} - -// Client is the basic type of this package. It provides methods for -// interaction with the API. -type Client struct { - SkipServerVersionCheck bool - HTTPClient *http.Client - TLSConfig *tls.Config - Dialer *net.Dialer - - endpoint string - endpointURL *url.URL - eventMonitor *eventMonitoringState - requestedAPIVersion APIVersion - serverAPIVersion APIVersion - expectedAPIVersion APIVersion -} - -// NewClient returns a Client instance ready for communication with the given -// server endpoint. It will use the latest remote API version available in the -// server. -func NewClient(endpoint string) (*Client, error) { - client, err := NewVersionedClient(endpoint, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates . It will use the latest remote API version -// available in the server. -func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) { - client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file). It will use the latest remote API version available in the server. -func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) { - client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClient returns a Client instance ready for communication with -// the given server endpoint, using a specific remote API version. -func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, false) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - return &Client{ - HTTPClient: http.DefaultClient, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - }, nil -} - -// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient. -func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString) -} - -// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates, using a specific remote API version. -func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) { - certPEMBlock, err := ioutil.ReadFile(cert) - if err != nil { - return nil, err - } - keyPEMBlock, err := ioutil.ReadFile(key) - if err != nil { - return nil, err - } - caPEMCert, err := ioutil.ReadFile(ca) - if err != nil { - return nil, err - } - return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString) -} - -// NewClientFromEnv returns a Client instance ready for communication created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewClientFromEnv() (*Client, error) { - client, err := NewVersionedClientFromEnv("") - if err != nil { - return nil, err - } - client.SkipServerVersionCheck = true - return client, nil -} - -// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from -// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH, -// and using a specific remote API version. -// -// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68. -// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7. -func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) { - dockerEnv, err := getDockerEnv() - if err != nil { - return nil, err - } - dockerHost := dockerEnv.dockerHost - if dockerEnv.dockerTLSVerify { - parts := strings.SplitN(dockerHost, "://", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost) - } - dockerHost = fmt.Sprintf("https://%s", parts[1]) - cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem") - key := filepath.Join(dockerEnv.dockerCertPath, "key.pem") - ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem") - return NewVersionedTLSClient(dockerHost, cert, key, ca, apiVersionString) - } - return NewVersionedClient(dockerHost, apiVersionString) -} - -// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens -// server endpoint, key and certificates (passed inline to the function as opposed to being -// read from a local file), using a specific remote API version. -func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) { - u, err := parseEndpoint(endpoint, true) - if err != nil { - return nil, err - } - var requestedAPIVersion APIVersion - if strings.Contains(apiVersionString, ".") { - requestedAPIVersion, err = NewAPIVersion(apiVersionString) - if err != nil { - return nil, err - } - } - if certPEMBlock == nil || keyPEMBlock == nil { - return nil, errors.New("Both cert and key are required") - } - tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) - if err != nil { - return nil, err - } - tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} - if caPEMCert == nil { - tlsConfig.InsecureSkipVerify = true - } else { - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caPEMCert) { - return nil, errors.New("Could not add RootCA pem") - } - tlsConfig.RootCAs = caPool - } - tr := &http.Transport{ - TLSClientConfig: tlsConfig, - } - if err != nil { - return nil, err - } - return &Client{ - HTTPClient: &http.Client{Transport: tr}, - TLSConfig: tlsConfig, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - eventMonitor: new(eventMonitoringState), - requestedAPIVersion: requestedAPIVersion, - }, nil -} - -func (c *Client) checkAPIVersion() error { - serverAPIVersionString, err := c.getServerAPIVersionString() - if err != nil { - return err - } - c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString) - if err != nil { - return err - } - if c.requestedAPIVersion == nil { - c.expectedAPIVersion = c.serverAPIVersion - } else { - c.expectedAPIVersion = c.requestedAPIVersion - } - return nil -} - -// Ping pings the docker server -// -// See https://goo.gl/kQCfJj for more details. -func (c *Client) Ping() error { - path := "/_ping" - body, status, err := c.do("GET", path, doOptions{}) - if err != nil { - return err - } - if status != http.StatusOK { - return newError(status, body) - } - return nil -} - -func (c *Client) getServerAPIVersionString() (version string, err error) { - body, status, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return "", err - } - if status != http.StatusOK { - return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", status) - } - var versionResponse map[string]interface{} - err = json.Unmarshal(body, &versionResponse) - if err != nil { - return "", err - } - if version, ok := (versionResponse["ApiVersion"]).(string); ok { - return version, nil - } - return "", nil -} - -type doOptions struct { - data interface{} - forceJSON bool -} - -func (c *Client) do(method, path string, doOptions doOptions) ([]byte, int, error) { - var params io.Reader - if doOptions.data != nil || doOptions.forceJSON { - buf, err := json.Marshal(doOptions.data) - if err != nil { - return nil, -1, err - } - params = bytes.NewBuffer(buf) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return nil, -1, err - } - } - req, err := http.NewRequest(method, c.getURL(path), params) - if err != nil { - return nil, -1, err - } - req.Header.Set("User-Agent", userAgent) - if doOptions.data != nil { - req.Header.Set("Content-Type", "application/json") - } else if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - var resp *http.Response - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol == "unix" { - var dial net.Conn - dial, err = c.Dialer.Dial(protocol, address) - if err != nil { - return nil, -1, err - } - defer dial.Close() - breader := bufio.NewReader(dial) - err = req.Write(dial) - if err != nil { - return nil, -1, err - } - resp, err = http.ReadResponse(breader, req) - } else { - resp, err = c.HTTPClient.Do(req) - } - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return nil, -1, ErrConnectionRefused - } - return nil, -1, err - } - defer resp.Body.Close() - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, -1, err - } - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, resp.StatusCode, newError(resp.StatusCode, body) - } - return body, resp.StatusCode, nil -} - -type streamOptions struct { - setRawTerminal bool - rawJSONStream bool - useJSONDecoder bool - headers map[string]string - in io.Reader - stdout io.Writer - stderr io.Writer - // timeout is the inital connection timeout - timeout time.Duration -} - -func (c *Client) stream(method, path string, streamOptions streamOptions) error { - if (method == "POST" || method == "PUT") && streamOptions.in == nil { - streamOptions.in = bytes.NewReader(nil) - } - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - req, err := http.NewRequest(method, c.getURL(path), streamOptions.in) - if err != nil { - return err - } - req.Header.Set("User-Agent", userAgent) - if method == "POST" { - req.Header.Set("Content-Type", "plain/text") - } - for key, val := range streamOptions.headers { - req.Header.Set(key, val) - } - var resp *http.Response - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if streamOptions.stdout == nil { - streamOptions.stdout = ioutil.Discard - } else if t, ok := streamOptions.stdout.(io.Closer); ok { - defer t.Close() - } - if streamOptions.stderr == nil { - streamOptions.stderr = ioutil.Discard - } else if t, ok := streamOptions.stderr.(io.Closer); ok { - defer t.Close() - } - if protocol == "unix" { - dial, err := c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - defer dial.Close() - breader := bufio.NewReader(dial) - err = req.Write(dial) - if err != nil { - return err - } - - // ReadResponse may hang if server does not replay - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Now().Add(streamOptions.timeout)) - } - - if resp, err = http.ReadResponse(breader, req); err != nil { - // Cancel timeout for future I/O operations - if streamOptions.timeout > 0 { - dial.SetDeadline(time.Time{}) - } - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return err - } - } else { - if resp, err = c.HTTPClient.Do(req); err != nil { - if strings.Contains(err.Error(), "connection refused") { - return ErrConnectionRefused - } - return err - } - } - defer resp.Body.Close() - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return err - } - return newError(resp.StatusCode, body) - } - if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" { - // if we want to get raw json stream, just copy it back to output - // without decoding it - if streamOptions.rawJSONStream { - _, err = io.Copy(streamOptions.stdout, resp.Body) - return err - } - dec := json.NewDecoder(resp.Body) - for { - var m jsonMessage - if err := dec.Decode(&m); err == io.EOF { - break - } else if err != nil { - return err - } - if m.Stream != "" { - fmt.Fprint(streamOptions.stdout, m.Stream) - } else if m.Progress != "" { - fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress) - } else if m.Error != "" { - return errors.New(m.Error) - } - if m.Status != "" { - fmt.Fprintln(streamOptions.stdout, m.Status) - } - } - } else { - if streamOptions.setRawTerminal { - _, err = io.Copy(streamOptions.stdout, resp.Body) - } else { - _, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body) - } - return err - } - return nil -} - -type hijackOptions struct { - success chan struct{} - setRawTerminal bool - in io.Reader - stdout io.Writer - stderr io.Writer - data interface{} -} - -func (c *Client) hijack(method, path string, hijackOptions hijackOptions) error { - if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil { - err := c.checkAPIVersion() - if err != nil { - return err - } - } - - var params io.Reader - if hijackOptions.data != nil { - buf, err := json.Marshal(hijackOptions.data) - if err != nil { - return err - } - params = bytes.NewBuffer(buf) - } - - if hijackOptions.stdout == nil { - hijackOptions.stdout = ioutil.Discard - } - if hijackOptions.stderr == nil { - hijackOptions.stderr = ioutil.Discard - } - req, err := http.NewRequest(method, c.getURL(path), params) - if err != nil { - return err - } - req.Header.Set("Content-Type", "plain/text") - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - if c.TLSConfig != nil && protocol != "unix" { - dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) - if err != nil { - return err - } - } else { - dial, err = c.Dialer.Dial(protocol, address) - if err != nil { - return err - } - } - clientconn := httputil.NewClientConn(dial, nil) - defer clientconn.Close() - clientconn.Do(req) - if hijackOptions.success != nil { - hijackOptions.success <- struct{}{} - <-hijackOptions.success - } - rwc, br := clientconn.Hijack() - defer rwc.Close() - errChanOut := make(chan error, 1) - errChanIn := make(chan error, 1) - go func() { - defer func() { - if hijackOptions.in != nil { - if closer, ok := hijackOptions.in.(io.Closer); ok { - closer.Close() - } - } - }() - var err error - if hijackOptions.setRawTerminal { - _, err = io.Copy(hijackOptions.stdout, br) - } else { - _, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br) - } - errChanOut <- err - }() - go func() { - var err error - if hijackOptions.in != nil { - _, err = io.Copy(rwc, hijackOptions.in) - } - errChanIn <- err - rwc.(interface { - CloseWrite() error - }).CloseWrite() - }() - errIn := <-errChanIn - errOut := <-errChanOut - if errIn != nil { - return errIn - } - return errOut -} - -func (c *Client) getURL(path string) string { - urlStr := strings.TrimRight(c.endpointURL.String(), "/") - if c.endpointURL.Scheme == "unix" { - urlStr = "" - } - - if c.requestedAPIVersion != nil { - return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path) - } - return fmt.Sprintf("%s%s", urlStr, path) -} - -type jsonMessage struct { - Status string `json:"status,omitempty"` - Progress string `json:"progress,omitempty"` - Error string `json:"error,omitempty"` - Stream string `json:"stream,omitempty"` -} - -func queryString(opts interface{}) string { - if opts == nil { - return "" - } - value := reflect.ValueOf(opts) - if value.Kind() == reflect.Ptr { - value = value.Elem() - } - if value.Kind() != reflect.Struct { - return "" - } - items := url.Values(map[string][]string{}) - for i := 0; i < value.NumField(); i++ { - field := value.Type().Field(i) - if field.PkgPath != "" { - continue - } - key := field.Tag.Get("qs") - if key == "" { - key = strings.ToLower(field.Name) - } else if key == "-" { - continue - } - addQueryStringValue(items, key, value.Field(i)) - } - return items.Encode() -} - -func addQueryStringValue(items url.Values, key string, v reflect.Value) { - switch v.Kind() { - case reflect.Bool: - if v.Bool() { - items.Add(key, "1") - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if v.Int() > 0 { - items.Add(key, strconv.FormatInt(v.Int(), 10)) - } - case reflect.Float32, reflect.Float64: - if v.Float() > 0 { - items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64)) - } - case reflect.String: - if v.String() != "" { - items.Add(key, v.String()) - } - case reflect.Ptr: - if !v.IsNil() { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Map: - if len(v.MapKeys()) > 0 { - if b, err := json.Marshal(v.Interface()); err == nil { - items.Add(key, string(b)) - } - } - case reflect.Array, reflect.Slice: - vLen := v.Len() - if vLen > 0 { - for i := 0; i < vLen; i++ { - addQueryStringValue(items, key, v.Index(i)) - } - } - } -} - -// Error represents failures in the API. It represents a failure from the API. -type Error struct { - Status int - Message string -} - -func newError(status int, body []byte) *Error { - return &Error{Status: status, Message: string(body)} -} - -func (e *Error) Error() string { - return fmt.Sprintf("API error (%d): %s", e.Status, e.Message) -} - -func parseEndpoint(endpoint string, tls bool) (*url.URL, error) { - u, err := url.Parse(endpoint) - if err != nil { - return nil, ErrInvalidEndpoint - } - if tls { - u.Scheme = "https" - } - switch u.Scheme { - case "unix": - return u, nil - case "http", "https", "tcp": - _, port, err := net.SplitHostPort(u.Host) - if err != nil { - if e, ok := err.(*net.AddrError); ok { - if e.Err == "missing port in address" { - return u, nil - } - } - return nil, ErrInvalidEndpoint - } - number, err := strconv.ParseInt(port, 10, 64) - if err == nil && number > 0 && number < 65536 { - if u.Scheme == "tcp" { - if number == 2376 { - u.Scheme = "https" - } else { - u.Scheme = "http" - } - } - return u, nil - } - return nil, ErrInvalidEndpoint - default: - return nil, ErrInvalidEndpoint - } -} - -type dockerEnv struct { - dockerHost string - dockerTLSVerify bool - dockerCertPath string -} - -func getDockerEnv() (*dockerEnv, error) { - dockerHost := os.Getenv("DOCKER_HOST") - var err error - if dockerHost == "" { - dockerHost, err = getDefaultDockerHost() - if err != nil { - return nil, err - } - } - dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != "" - var dockerCertPath string - if dockerTLSVerify { - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - if dockerCertPath == "" { - home := homedir.Get() - if home == "" { - return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set") - } - dockerCertPath = filepath.Join(home, ".docker") - dockerCertPath, err = filepath.Abs(dockerCertPath) - if err != nil { - return nil, err - } - } - } - return &dockerEnv{ - dockerHost: dockerHost, - dockerTLSVerify: dockerTLSVerify, - dockerCertPath: dockerCertPath, - }, nil -} - -func getDefaultDockerHost() (string, error) { - var defaultHost string - if runtime.GOOS != "windows" { - // If we do not have a host, default to unix socket - defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket) - } else { - // If we do not have a host, default to TCP socket on Windows - defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort) - } - return opts.ValidateHost(defaultHost) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go deleted file mode 100644 index 0cc8d2285..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client_test.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "reflect" - "strconv" - "strings" - "testing" - "time" -) - -func TestNewAPIClient(t *testing.T) { - endpoint := "http://localhost:4243" - client, err := NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if client.HTTPClient != http.DefaultClient { - t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient) - } - // test unix socket endpoints - endpoint = "unix:///var/run/docker.sock" - client, err = NewClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if !client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be true, got false") - } - if client.requestedAPIVersion != nil { - t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) - } -} - -func newTLSClient(endpoint string) (*Client, error) { - return NewTLSClient(endpoint, - "testing/data/cert.pem", - "testing/data/key.pem", - "testing/data/ca.pem") -} - -func TestNewTSLAPIClient(t *testing.T) { - endpoint := "https://localhost:4243" - client, err := newTLSClient(endpoint) - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if !client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be true, got false") - } - if client.requestedAPIVersion != nil { - t.Errorf("Expected requestedAPIVersion to be nil, got %#v.", client.requestedAPIVersion) - } -} - -func TestNewVersionedClient(t *testing.T) { - endpoint := "http://localhost:4243" - client, err := NewVersionedClient(endpoint, "1.12") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if client.HTTPClient != http.DefaultClient { - t.Errorf("Expected http.Client %#v. Got %#v.", http.DefaultClient, client.HTTPClient) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.12" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.12", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClient(t *testing.T) { - certPath := "testing/data/cert.pem" - keyPath := "testing/data/key.pem" - caPath := "testing/data/ca.pem" - endpoint := "https://localhost:4243" - client, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err != nil { - t.Fatal(err) - } - if client.endpoint != endpoint { - t.Errorf("Expected endpoint %s. Got %s.", endpoint, client.endpoint) - } - if reqVersion := client.requestedAPIVersion.String(); reqVersion != "1.14" { - t.Errorf("Wrong requestAPIVersion. Want %q. Got %q.", "1.14", reqVersion) - } - if client.SkipServerVersionCheck { - t.Error("Expected SkipServerVersionCheck to be false, got true") - } -} - -func TestNewTLSVersionedClientInvalidCA(t *testing.T) { - certPath := "testing/data/cert.pem" - keyPath := "testing/data/key.pem" - caPath := "testing/data/key.pem" - endpoint := "https://localhost:4243" - _, err := NewVersionedTLSClient(endpoint, certPath, keyPath, caPath, "1.14") - if err == nil { - t.Errorf("Expected invalid ca at %s", caPath) - } -} - -func TestNewClientInvalidEndpoint(t *testing.T) { - cases := []string{ - "htp://localhost:3243", "http://localhost:a", "localhost:8080", - "", "localhost", "http://localhost:8080:8383", "http://localhost:65536", - "https://localhost:-20", - } - for _, c := range cases { - client, err := NewClient(c) - if client != nil { - t.Errorf("Want client for invalid endpoint, got %#v.", client) - } - if !reflect.DeepEqual(err, ErrInvalidEndpoint) { - t.Errorf("NewClient(%q): Got invalid error for invalid endpoint. Want %#v. Got %#v.", c, ErrInvalidEndpoint, err) - } - } -} - -func TestNewTLSClient(t *testing.T) { - var tests = []struct { - endpoint string - expected string - }{ - {"tcp://localhost:2376", "https"}, - {"tcp://localhost:2375", "https"}, - {"tcp://localhost:4000", "https"}, - {"http://localhost:4000", "https"}, - } - - for _, tt := range tests { - client, err := newTLSClient(tt.endpoint) - if err != nil { - t.Error(err) - } - got := client.endpointURL.Scheme - if got != tt.expected { - t.Errorf("endpointURL.Scheme: Got %s. Want %s.", got, tt.expected) - } - } -} - -func TestGetURL(t *testing.T) { - var tests = []struct { - endpoint string - path string - expected string - }{ - {"http://localhost:4243/", "/", "http://localhost:4243/"}, - {"http://localhost:4243", "/", "http://localhost:4243/"}, - {"http://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, - {"tcp://localhost:4243", "/containers/ps", "http://localhost:4243/containers/ps"}, - {"http://localhost:4243/////", "/", "http://localhost:4243/"}, - {"unix:///var/run/docker.socket", "/containers", "/containers"}, - } - for _, tt := range tests { - client, _ := NewClient(tt.endpoint) - client.endpoint = tt.endpoint - client.SkipServerVersionCheck = true - got := client.getURL(tt.path) - if got != tt.expected { - t.Errorf("getURL(%q): Got %s. Want %s.", tt.path, got, tt.expected) - } - } -} - -func TestError(t *testing.T) { - err := newError(400, []byte("bad parameter")) - expected := Error{Status: 400, Message: "bad parameter"} - if !reflect.DeepEqual(expected, *err) { - t.Errorf("Wrong error type. Want %#v. Got %#v.", expected, *err) - } - message := "API error (400): bad parameter" - if err.Error() != message { - t.Errorf("Wrong error message. Want %q. Got %q.", message, err.Error()) - } -} - -func TestQueryString(t *testing.T) { - v := float32(2.4) - f32QueryString := fmt.Sprintf("w=%s&x=10&y=10.35", strconv.FormatFloat(float64(v), 'f', -1, 64)) - jsonPerson := url.QueryEscape(`{"Name":"gopher","age":4}`) - var tests = []struct { - input interface{} - want string - }{ - {&ListContainersOptions{All: true}, "all=1"}, - {ListContainersOptions{All: true}, "all=1"}, - {ListContainersOptions{Before: "something"}, "before=something"}, - {ListContainersOptions{Before: "something", Since: "other"}, "before=something&since=other"}, - {ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, "filters=%7B%22status%22%3A%5B%22paused%22%2C%22running%22%5D%7D"}, - {dumb{X: 10, Y: 10.35000}, "x=10&y=10.35"}, - {dumb{W: v, X: 10, Y: 10.35000}, f32QueryString}, - {dumb{X: 10, Y: 10.35000, Z: 10}, "x=10&y=10.35&zee=10"}, - {dumb{v: 4, X: 10, Y: 10.35000}, "x=10&y=10.35"}, - {dumb{T: 10, Y: 10.35000}, "y=10.35"}, - {dumb{Person: &person{Name: "gopher", Age: 4}}, "p=" + jsonPerson}, - {nil, ""}, - {10, ""}, - {"not_a_struct", ""}, - } - for _, tt := range tests { - got := queryString(tt.input) - if got != tt.want { - t.Errorf("queryString(%v). Want %q. Got %q.", tt.input, tt.want, got) - } - } -} - -func TestNewAPIVersionFailures(t *testing.T) { - var tests = []struct { - input string - expectedError string - }{ - {"1-0", `Unable to parse version "1-0"`}, - {"1.0-beta", `Unable to parse version "1.0-beta": "0-beta" is not an integer`}, - } - for _, tt := range tests { - v, err := NewAPIVersion(tt.input) - if v != nil { - t.Errorf("Expected version, got %v.", v) - } - if err.Error() != tt.expectedError { - t.Errorf("NewAPIVersion(%q): wrong error. Want %q. Got %q", tt.input, tt.expectedError, err.Error()) - } - } -} - -func TestAPIVersions(t *testing.T) { - var tests = []struct { - a string - b string - expectedALessThanB bool - expectedALessThanOrEqualToB bool - expectedAGreaterThanB bool - expectedAGreaterThanOrEqualToB bool - }{ - {"1.11", "1.11", false, true, false, true}, - {"1.10", "1.11", true, true, false, false}, - {"1.11", "1.10", false, false, true, true}, - - {"1.9", "1.11", true, true, false, false}, - {"1.11", "1.9", false, false, true, true}, - - {"1.1.1", "1.1", false, false, true, true}, - {"1.1", "1.1.1", true, true, false, false}, - - {"2.1", "1.1.1", false, false, true, true}, - {"2.1", "1.3.1", false, false, true, true}, - {"1.1.1", "2.1", true, true, false, false}, - {"1.3.1", "2.1", true, true, false, false}, - } - - for _, tt := range tests { - a, _ := NewAPIVersion(tt.a) - b, _ := NewAPIVersion(tt.b) - - if tt.expectedALessThanB && !a.LessThan(b) { - t.Errorf("Expected %#v < %#v", a, b) - } - if tt.expectedALessThanOrEqualToB && !a.LessThanOrEqualTo(b) { - t.Errorf("Expected %#v <= %#v", a, b) - } - if tt.expectedAGreaterThanB && !a.GreaterThan(b) { - t.Errorf("Expected %#v > %#v", a, b) - } - if tt.expectedAGreaterThanOrEqualToB && !a.GreaterThanOrEqualTo(b) { - t.Errorf("Expected %#v >= %#v", a, b) - } - } -} - -func TestPing(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.Ping() - if err != nil { - t.Fatal(err) - } -} - -func TestPingFailing(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } - expectedErrMsg := "API error (500): " - if err.Error() != expectedErrMsg { - t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) - } -} - -func TestPingFailingWrongStatus(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusAccepted} - client := newTestClient(fakeRT) - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } - expectedErrMsg := "API error (202): " - if err.Error() != expectedErrMsg { - t.Fatalf("Expected error to be %q, got: %q", expectedErrMsg, err.Error()) - } -} - -func TestPingErrorWithUnixSocket(t *testing.T) { - go func() { - li, err := net.Listen("unix", "/tmp/echo.sock") - if err != nil { - t.Fatal(err) - } - defer li.Close() - if err != nil { - t.Fatalf("Expected to get listner, but failed: %#v", err) - } - - fd, err := li.Accept() - if err != nil { - t.Fatalf("Expected to accept connection, but failed: %#v", err) - } - - buf := make([]byte, 512) - nr, err := fd.Read(buf) - - // Create invalid response message to occur error - data := buf[0:nr] - for i := 0; i < 10; i++ { - data[i] = 63 - } - - _, err = fd.Write(data) - if err != nil { - t.Fatalf("Expected to write to socket, but failed: %#v", err) - } - - return - }() - - // Wait for unix socket to listen - time.Sleep(10 * time.Millisecond) - - endpoint := "unix:///tmp/echo.sock" - u, _ := parseEndpoint(endpoint, false) - client := Client{ - HTTPClient: http.DefaultClient, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - } - - err := client.Ping() - if err == nil { - t.Fatal("Expected non nil error, got nil") - } -} - -type FakeRoundTripper struct { - message string - status int - header map[string]string - requests []*http.Request -} - -func (rt *FakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { - body := strings.NewReader(rt.message) - rt.requests = append(rt.requests, r) - res := &http.Response{ - StatusCode: rt.status, - Body: ioutil.NopCloser(body), - Header: make(http.Header), - } - for k, v := range rt.header { - res.Header.Set(k, v) - } - return res, nil -} - -func (rt *FakeRoundTripper) Reset() { - rt.requests = nil -} - -type person struct { - Name string - Age int `json:"age"` -} - -type dumb struct { - T int `qs:"-"` - v int - W float32 - X int - Y float64 - Z int `qs:"zee"` - Person *person `qs:"p"` -} - -type fakeEndpointURL struct { - Scheme string -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go deleted file mode 100644 index 5f5deaa19..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go +++ /dev/null @@ -1,1122 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" -) - -// ErrContainerAlreadyExists is the error returned by CreateContainer when the -// container already exists. -var ErrContainerAlreadyExists = errors.New("container already exists") - -// ListContainersOptions specify parameters to the ListContainers function. -// -// See https://goo.gl/47a6tO for more details. -type ListContainersOptions struct { - All bool - Size bool - Limit int - Since string - Before string - Filters map[string][]string -} - -// APIPort is a type that represents a port mapping returned by the Docker API -type APIPort struct { - PrivatePort int64 `json:"PrivatePort,omitempty" yaml:"PrivatePort,omitempty"` - PublicPort int64 `json:"PublicPort,omitempty" yaml:"PublicPort,omitempty"` - Type string `json:"Type,omitempty" yaml:"Type,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty"` -} - -// APIContainers represents each container in the list returned by -// ListContainers. -type APIContainers struct { - ID string `json:"Id" yaml:"Id"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Command string `json:"Command,omitempty" yaml:"Command,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - Ports []APIPort `json:"Ports,omitempty" yaml:"Ports,omitempty"` - SizeRw int64 `json:"SizeRw,omitempty" yaml:"SizeRw,omitempty"` - SizeRootFs int64 `json:"SizeRootFs,omitempty" yaml:"SizeRootFs,omitempty"` - Names []string `json:"Names,omitempty" yaml:"Names,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels, omitempty"` -} - -// ListContainers returns a slice of containers matching the given criteria. -// -// See https://goo.gl/47a6tO for more details. -func (c *Client) ListContainers(opts ListContainersOptions) ([]APIContainers, error) { - path := "/containers/json?" + queryString(opts) - body, _, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - var containers []APIContainers - err = json.Unmarshal(body, &containers) - if err != nil { - return nil, err - } - return containers, nil -} - -// Port represents the port number and the protocol, in the form -// /. For example: 80/tcp. -type Port string - -// Port returns the number of the port. -func (p Port) Port() string { - return strings.Split(string(p), "/")[0] -} - -// Proto returns the name of the protocol. -func (p Port) Proto() string { - parts := strings.Split(string(p), "/") - if len(parts) == 1 { - return "tcp" - } - return parts[1] -} - -// State represents the state of a container. -type State struct { - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - Paused bool `json:"Paused,omitempty" yaml:"Paused,omitempty"` - Restarting bool `json:"Restarting,omitempty" yaml:"Restarting,omitempty"` - OOMKilled bool `json:"OOMKilled,omitempty" yaml:"OOMKilled,omitempty"` - Pid int `json:"Pid,omitempty" yaml:"Pid,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - Error string `json:"Error,omitempty" yaml:"Error,omitempty"` - StartedAt time.Time `json:"StartedAt,omitempty" yaml:"StartedAt,omitempty"` - FinishedAt time.Time `json:"FinishedAt,omitempty" yaml:"FinishedAt,omitempty"` -} - -// String returns the string representation of a state. -func (s *State) String() string { - if s.Running { - if s.Paused { - return "paused" - } - return fmt.Sprintf("Up %s", time.Now().UTC().Sub(s.StartedAt)) - } - return fmt.Sprintf("Exit %d", s.ExitCode) -} - -// PortBinding represents the host/container port mapping as returned in the -// `docker inspect` json -type PortBinding struct { - HostIP string `json:"HostIP,omitempty" yaml:"HostIP,omitempty"` - HostPort string `json:"HostPort,omitempty" yaml:"HostPort,omitempty"` -} - -// PortMapping represents a deprecated field in the `docker inspect` output, -// and its value as found in NetworkSettings should always be nil -type PortMapping map[string]string - -// NetworkSettings contains network-related information about a container -type NetworkSettings struct { - IPAddress string `json:"IPAddress,omitempty" yaml:"IPAddress,omitempty"` - IPPrefixLen int `json:"IPPrefixLen,omitempty" yaml:"IPPrefixLen,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Gateway string `json:"Gateway,omitempty" yaml:"Gateway,omitempty"` - Bridge string `json:"Bridge,omitempty" yaml:"Bridge,omitempty"` - PortMapping map[string]PortMapping `json:"PortMapping,omitempty" yaml:"PortMapping,omitempty"` - Ports map[Port][]PortBinding `json:"Ports,omitempty" yaml:"Ports,omitempty"` - NetworkID string `json:"NetworkID,omitempty" yaml:"NetworkID,omitempty"` - EndpointID string `json:"EndpointID,omitempty" yaml:"EndpointID,omitempty"` - SandboxKey string `json:"SandboxKey,omitempty" yaml:"SandboxKey,omitempty"` - GlobalIPv6Address string `json:"GlobalIPv6Address,omitempty" yaml:"GlobalIPv6Address,omitempty"` - GlobalIPv6PrefixLen int `json:"GlobalIPv6PrefixLen,omitempty" yaml:"GlobalIPv6PrefixLen,omitempty"` - IPv6Gateway string `json:"IPv6Gateway,omitempty" yaml:"IPv6Gateway,omitempty"` - LinkLocalIPv6Address string `json:"LinkLocalIPv6Address,omitempty" yaml:"LinkLocalIPv6Address,omitempty"` - LinkLocalIPv6PrefixLen int `json:"LinkLocalIPv6PrefixLen,omitempty" yaml:"LinkLocalIPv6PrefixLen,omitempty"` - SecondaryIPAddresses []string `json:"SecondaryIPAddresses,omitempty" yaml:"SecondaryIPAddresses,omitempty"` - SecondaryIPv6Addresses []string `json:"SecondaryIPv6Addresses,omitempty" yaml:"SecondaryIPv6Addresses,omitempty"` -} - -// PortMappingAPI translates the port mappings as contained in NetworkSettings -// into the format in which they would appear when returned by the API -func (settings *NetworkSettings) PortMappingAPI() []APIPort { - var mapping []APIPort - for port, bindings := range settings.Ports { - p, _ := parsePort(port.Port()) - if len(bindings) == 0 { - mapping = append(mapping, APIPort{ - PublicPort: int64(p), - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - p, _ := parsePort(port.Port()) - h, _ := parsePort(binding.HostPort) - mapping = append(mapping, APIPort{ - PrivatePort: int64(p), - PublicPort: int64(h), - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - return mapping -} - -func parsePort(rawPort string) (int, error) { - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// Config is the list of configuration options used when creating a container. -// Config does not contain the options that are specific to starting a container on a -// given host. Those are contained in HostConfig -type Config struct { - Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` - ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` - Cmd []string `json:"Cmd" yaml:"Cmd"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Mount represents a mount point in the container. -// -// It has been added in the version 1.20 of the Docker API, available since -// Docker 1.8. -type Mount struct { - Source string - Destination string - Mode string - RW bool -} - -// LogConfig defines the log driver type and the configuration for it. -type LogConfig struct { - Type string `json:"Type,omitempty" yaml:"Type,omitempty"` - Config map[string]string `json:"Config,omitempty" yaml:"Config,omitempty"` -} - -// ULimit defines system-wide resource limitations -// This can help a lot in system administration, e.g. when a user starts too many processes and therefore makes the system unresponsive for other users. -type ULimit struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - Soft int64 `json:"Soft,omitempty" yaml:"Soft,omitempty"` - Hard int64 `json:"Hard,omitempty" yaml:"Hard,omitempty"` -} - -// SwarmNode containers information about which Swarm node the container is on -type SwarmNode struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - IP string `json:"IP,omitempty" yaml:"IP,omitempty"` - Addr string `json:"Addr,omitempty" yaml:"Addr,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - CPUs int64 `json:"CPUs,omitempty" yaml:"CPUs,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Container is the type encompasing everything about a container - its config, -// hostconfig, etc. -type Container struct { - ID string `json:"Id" yaml:"Id"` - - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` - - Path string `json:"Path,omitempty" yaml:"Path,omitempty"` - Args []string `json:"Args,omitempty" yaml:"Args,omitempty"` - - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` - State State `json:"State,omitempty" yaml:"State,omitempty"` - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - - Node *SwarmNode `json:"Node,omitempty" yaml:"Node,omitempty"` - - NetworkSettings *NetworkSettings `json:"NetworkSettings,omitempty" yaml:"NetworkSettings,omitempty"` - - SysInitPath string `json:"SysInitPath,omitempty" yaml:"SysInitPath,omitempty"` - ResolvConfPath string `json:"ResolvConfPath,omitempty" yaml:"ResolvConfPath,omitempty"` - HostnamePath string `json:"HostnamePath,omitempty" yaml:"HostnamePath,omitempty"` - HostsPath string `json:"HostsPath,omitempty" yaml:"HostsPath,omitempty"` - LogPath string `json:"LogPath,omitempty" yaml:"LogPath,omitempty"` - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - - Volumes map[string]string `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumesRW map[string]bool `json:"VolumesRW,omitempty" yaml:"VolumesRW,omitempty"` - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` - ExecIDs []string `json:"ExecIDs,omitempty" yaml:"ExecIDs,omitempty"` - - RestartCount int `json:"RestartCount,omitempty" yaml:"RestartCount,omitempty"` - - AppArmorProfile string `json:"AppArmorProfile,omitempty" yaml:"AppArmorProfile,omitempty"` -} - -// RenameContainerOptions specify parameters to the RenameContainer function. -// -// See https://goo.gl/laSOIy for more details. -type RenameContainerOptions struct { - // ID of container to rename - ID string `qs:"-"` - - // New name - Name string `json:"name,omitempty" yaml:"name,omitempty"` -} - -// RenameContainer updates and existing containers name -// -// See https://goo.gl/laSOIy for more details. -func (c *Client) RenameContainer(opts RenameContainerOptions) error { - _, _, err := c.do("POST", fmt.Sprintf("/containers/"+opts.ID+"/rename?%s", queryString(opts)), doOptions{}) - return err -} - -// InspectContainer returns information about a container by its ID. -// -// See https://goo.gl/RdIq0b for more details. -func (c *Client) InspectContainer(id string) (*Container, error) { - path := "/containers/" + id + "/json" - body, status, err := c.do("GET", path, doOptions{}) - if status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - if err != nil { - return nil, err - } - var container Container - err = json.Unmarshal(body, &container) - if err != nil { - return nil, err - } - return &container, nil -} - -// ContainerChanges returns changes in the filesystem of the given container. -// -// See https://goo.gl/9GsTIF for more details. -func (c *Client) ContainerChanges(id string) ([]Change, error) { - path := "/containers/" + id + "/changes" - body, status, err := c.do("GET", path, doOptions{}) - if status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: id} - } - if err != nil { - return nil, err - } - var changes []Change - err = json.Unmarshal(body, &changes) - if err != nil { - return nil, err - } - return changes, nil -} - -// CreateContainerOptions specify parameters to the CreateContainer function. -// -// See https://goo.gl/WxQzrr for more details. -type CreateContainerOptions struct { - Name string - Config *Config `qs:"-"` - HostConfig *HostConfig `qs:"-"` -} - -// CreateContainer creates a new container, returning the container instance, -// or an error in case of failure. -// -// See https://goo.gl/WxQzrr for more details. -func (c *Client) CreateContainer(opts CreateContainerOptions) (*Container, error) { - path := "/containers/create?" + queryString(opts) - body, status, err := c.do( - "POST", - path, - doOptions{ - data: struct { - *Config - HostConfig *HostConfig `json:"HostConfig,omitempty" yaml:"HostConfig,omitempty"` - }{ - opts.Config, - opts.HostConfig, - }, - }, - ) - - if status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - if status == http.StatusConflict { - return nil, ErrContainerAlreadyExists - } - if err != nil { - return nil, err - } - var container Container - err = json.Unmarshal(body, &container) - if err != nil { - return nil, err - } - - container.Name = opts.Name - - return &container, nil -} - -// KeyValuePair is a type for generic key/value pairs as used in the Lxc -// configuration -type KeyValuePair struct { - Key string `json:"Key,omitempty" yaml:"Key,omitempty"` - Value string `json:"Value,omitempty" yaml:"Value,omitempty"` -} - -// RestartPolicy represents the policy for automatically restarting a container. -// -// Possible values are: -// -// - always: the docker daemon will always restart the container -// - on-failure: the docker daemon will restart the container on failures, at -// most MaximumRetryCount times -// - no: the docker daemon will not restart the container automatically -type RestartPolicy struct { - Name string `json:"Name,omitempty" yaml:"Name,omitempty"` - MaximumRetryCount int `json:"MaximumRetryCount,omitempty" yaml:"MaximumRetryCount,omitempty"` -} - -// AlwaysRestart returns a restart policy that tells the Docker daemon to -// always restart the container. -func AlwaysRestart() RestartPolicy { - return RestartPolicy{Name: "always"} -} - -// RestartOnFailure returns a restart policy that tells the Docker daemon to -// restart the container on failures, trying at most maxRetry times. -func RestartOnFailure(maxRetry int) RestartPolicy { - return RestartPolicy{Name: "on-failure", MaximumRetryCount: maxRetry} -} - -// NeverRestart returns a restart policy that tells the Docker daemon to never -// restart the container on failures. -func NeverRestart() RestartPolicy { - return RestartPolicy{Name: "no"} -} - -// Device represents a device mapping between the Docker host and the -// container. -type Device struct { - PathOnHost string `json:"PathOnHost,omitempty" yaml:"PathOnHost,omitempty"` - PathInContainer string `json:"PathInContainer,omitempty" yaml:"PathInContainer,omitempty"` - CgroupPermissions string `json:"CgroupPermissions,omitempty" yaml:"CgroupPermissions,omitempty"` -} - -// HostConfig contains the container options related to starting a container on -// a given host -type HostConfig struct { - Binds []string `json:"Binds,omitempty" yaml:"Binds,omitempty"` - CapAdd []string `json:"CapAdd,omitempty" yaml:"CapAdd,omitempty"` - CapDrop []string `json:"CapDrop,omitempty" yaml:"CapDrop,omitempty"` - ContainerIDFile string `json:"ContainerIDFile,omitempty" yaml:"ContainerIDFile,omitempty"` - LxcConf []KeyValuePair `json:"LxcConf,omitempty" yaml:"LxcConf,omitempty"` - Privileged bool `json:"Privileged,omitempty" yaml:"Privileged,omitempty"` - PortBindings map[Port][]PortBinding `json:"PortBindings,omitempty" yaml:"PortBindings,omitempty"` - Links []string `json:"Links,omitempty" yaml:"Links,omitempty"` - PublishAllPorts bool `json:"PublishAllPorts,omitempty" yaml:"PublishAllPorts,omitempty"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.10 and above only - DNSSearch []string `json:"DnsSearch,omitempty" yaml:"DnsSearch,omitempty"` - ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"` - VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"` - IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"` - PidMode string `json:"PidMode,omitempty" yaml:"PidMode,omitempty"` - UTSMode string `json:"UTSMode,omitempty" yaml:"UTSMode,omitempty"` - RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"` - Devices []Device `json:"Devices,omitempty" yaml:"Devices,omitempty"` - LogConfig LogConfig `json:"LogConfig,omitempty" yaml:"LogConfig,omitempty"` - ReadonlyRootfs bool `json:"ReadonlyRootfs,omitempty" yaml:"ReadonlyRootfs,omitempty"` - SecurityOpt []string `json:"SecurityOpt,omitempty" yaml:"SecurityOpt,omitempty"` - CgroupParent string `json:"CgroupParent,omitempty" yaml:"CgroupParent,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - MemorySwappiness int64 `json:"MemorySwappiness,omitempty" yaml:"MemorySwappiness,omitempty"` - OOMKillDisable bool `json:"OomKillDisable,omitempty" yaml:"OomKillDisable"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - CPUSetCPUs string `json:"CpusetCpus,omitempty" yaml:"CpusetCpus,omitempty"` - CPUSetMEMs string `json:"CpusetMems,omitempty" yaml:"CpusetMems,omitempty"` - CPUQuota int64 `json:"CpuQuota,omitempty" yaml:"CpuQuota,omitempty"` - CPUPeriod int64 `json:"CpuPeriod,omitempty" yaml:"CpuPeriod,omitempty"` - BlkioWeight int64 `json:"BlkioWeight,omitempty" yaml:"BlkioWeight"` - Ulimits []ULimit `json:"Ulimits,omitempty" yaml:"Ulimits,omitempty"` -} - -// StartContainer starts a container, returning an error in case of failure. -// -// See https://goo.gl/MrBAJv for more details. -func (c *Client) StartContainer(id string, hostConfig *HostConfig) error { - path := "/containers/" + id + "/start" - _, status, err := c.do("POST", path, doOptions{data: hostConfig, forceJSON: true}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: id, Err: err} - } - if status == http.StatusNotModified { - return &ContainerAlreadyRunning{ID: id} - } - if err != nil { - return err - } - return nil -} - -// StopContainer stops a container, killing it after the given timeout (in -// seconds). -// -// See https://goo.gl/USqsFt for more details. -func (c *Client) StopContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/stop?t=%d", id, timeout) - _, status, err := c.do("POST", path, doOptions{}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - if status == http.StatusNotModified { - return &ContainerNotRunning{ID: id} - } - if err != nil { - return err - } - return nil -} - -// RestartContainer stops a container, killing it after the given timeout (in -// seconds), during the stop process. -// -// See https://goo.gl/QzsDnz for more details. -func (c *Client) RestartContainer(id string, timeout uint) error { - path := fmt.Sprintf("/containers/%s/restart?t=%d", id, timeout) - _, status, err := c.do("POST", path, doOptions{}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - if err != nil { - return err - } - return nil -} - -// PauseContainer pauses the given container. -// -// See https://goo.gl/OF7W9X for more details. -func (c *Client) PauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/pause", id) - _, status, err := c.do("POST", path, doOptions{}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - if err != nil { - return err - } - return nil -} - -// UnpauseContainer unpauses the given container. -// -// See https://goo.gl/7dwyPA for more details. -func (c *Client) UnpauseContainer(id string) error { - path := fmt.Sprintf("/containers/%s/unpause", id) - _, status, err := c.do("POST", path, doOptions{}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: id} - } - if err != nil { - return err - } - return nil -} - -// TopResult represents the list of processes running in a container, as -// returned by /containers//top. -// -// See https://goo.gl/Rb46aY for more details. -type TopResult struct { - Titles []string - Processes [][]string -} - -// TopContainer returns processes running inside a container -// -// See https://goo.gl/Rb46aY for more details. -func (c *Client) TopContainer(id string, psArgs string) (TopResult, error) { - var args string - var result TopResult - if psArgs != "" { - args = fmt.Sprintf("?ps_args=%s", psArgs) - } - path := fmt.Sprintf("/containers/%s/top%s", id, args) - body, status, err := c.do("GET", path, doOptions{}) - if status == http.StatusNotFound { - return result, &NoSuchContainer{ID: id} - } - if err != nil { - return result, err - } - err = json.Unmarshal(body, &result) - if err != nil { - return result, err - } - return result, nil -} - -// Stats represents container statistics, returned by /containers//stats. -// -// See https://goo.gl/GNmLHb for more details. -type Stats struct { - Read time.Time `json:"read,omitempty" yaml:"read,omitempty"` - Network struct { - RxDropped uint64 `json:"rx_dropped,omitempty" yaml:"rx_dropped,omitempty"` - RxBytes uint64 `json:"rx_bytes,omitempty" yaml:"rx_bytes,omitempty"` - RxErrors uint64 `json:"rx_errors,omitempty" yaml:"rx_errors,omitempty"` - TxPackets uint64 `json:"tx_packets,omitempty" yaml:"tx_packets,omitempty"` - TxDropped uint64 `json:"tx_dropped,omitempty" yaml:"tx_dropped,omitempty"` - RxPackets uint64 `json:"rx_packets,omitempty" yaml:"rx_packets,omitempty"` - TxErrors uint64 `json:"tx_errors,omitempty" yaml:"tx_errors,omitempty"` - TxBytes uint64 `json:"tx_bytes,omitempty" yaml:"tx_bytes,omitempty"` - } `json:"network,omitempty" yaml:"network,omitempty"` - MemoryStats struct { - Stats struct { - TotalPgmafault uint64 `json:"total_pgmafault,omitempty" yaml:"total_pgmafault,omitempty"` - Cache uint64 `json:"cache,omitempty" yaml:"cache,omitempty"` - MappedFile uint64 `json:"mapped_file,omitempty" yaml:"mapped_file,omitempty"` - TotalInactiveFile uint64 `json:"total_inactive_file,omitempty" yaml:"total_inactive_file,omitempty"` - Pgpgout uint64 `json:"pgpgout,omitempty" yaml:"pgpgout,omitempty"` - Rss uint64 `json:"rss,omitempty" yaml:"rss,omitempty"` - TotalMappedFile uint64 `json:"total_mapped_file,omitempty" yaml:"total_mapped_file,omitempty"` - Writeback uint64 `json:"writeback,omitempty" yaml:"writeback,omitempty"` - Unevictable uint64 `json:"unevictable,omitempty" yaml:"unevictable,omitempty"` - Pgpgin uint64 `json:"pgpgin,omitempty" yaml:"pgpgin,omitempty"` - TotalUnevictable uint64 `json:"total_unevictable,omitempty" yaml:"total_unevictable,omitempty"` - Pgmajfault uint64 `json:"pgmajfault,omitempty" yaml:"pgmajfault,omitempty"` - TotalRss uint64 `json:"total_rss,omitempty" yaml:"total_rss,omitempty"` - TotalRssHuge uint64 `json:"total_rss_huge,omitempty" yaml:"total_rss_huge,omitempty"` - TotalWriteback uint64 `json:"total_writeback,omitempty" yaml:"total_writeback,omitempty"` - TotalInactiveAnon uint64 `json:"total_inactive_anon,omitempty" yaml:"total_inactive_anon,omitempty"` - RssHuge uint64 `json:"rss_huge,omitempty" yaml:"rss_huge,omitempty"` - HierarchicalMemoryLimit uint64 `json:"hierarchical_memory_limit,omitempty" yaml:"hierarchical_memory_limit,omitempty"` - TotalPgfault uint64 `json:"total_pgfault,omitempty" yaml:"total_pgfault,omitempty"` - TotalActiveFile uint64 `json:"total_active_file,omitempty" yaml:"total_active_file,omitempty"` - ActiveAnon uint64 `json:"active_anon,omitempty" yaml:"active_anon,omitempty"` - TotalActiveAnon uint64 `json:"total_active_anon,omitempty" yaml:"total_active_anon,omitempty"` - TotalPgpgout uint64 `json:"total_pgpgout,omitempty" yaml:"total_pgpgout,omitempty"` - TotalCache uint64 `json:"total_cache,omitempty" yaml:"total_cache,omitempty"` - InactiveAnon uint64 `json:"inactive_anon,omitempty" yaml:"inactive_anon,omitempty"` - ActiveFile uint64 `json:"active_file,omitempty" yaml:"active_file,omitempty"` - Pgfault uint64 `json:"pgfault,omitempty" yaml:"pgfault,omitempty"` - InactiveFile uint64 `json:"inactive_file,omitempty" yaml:"inactive_file,omitempty"` - TotalPgpgin uint64 `json:"total_pgpgin,omitempty" yaml:"total_pgpgin,omitempty"` - } `json:"stats,omitempty" yaml:"stats,omitempty"` - MaxUsage uint64 `json:"max_usage,omitempty" yaml:"max_usage,omitempty"` - Usage uint64 `json:"usage,omitempty" yaml:"usage,omitempty"` - Failcnt uint64 `json:"failcnt,omitempty" yaml:"failcnt,omitempty"` - Limit uint64 `json:"limit,omitempty" yaml:"limit,omitempty"` - } `json:"memory_stats,omitempty" yaml:"memory_stats,omitempty"` - BlkioStats struct { - IOServiceBytesRecursive []BlkioStatsEntry `json:"io_service_bytes_recursive,omitempty" yaml:"io_service_bytes_recursive,omitempty"` - IOServicedRecursive []BlkioStatsEntry `json:"io_serviced_recursive,omitempty" yaml:"io_serviced_recursive,omitempty"` - IOQueueRecursive []BlkioStatsEntry `json:"io_queue_recursive,omitempty" yaml:"io_queue_recursive,omitempty"` - IOServiceTimeRecursive []BlkioStatsEntry `json:"io_service_time_recursive,omitempty" yaml:"io_service_time_recursive,omitempty"` - IOWaitTimeRecursive []BlkioStatsEntry `json:"io_wait_time_recursive,omitempty" yaml:"io_wait_time_recursive,omitempty"` - IOMergedRecursive []BlkioStatsEntry `json:"io_merged_recursive,omitempty" yaml:"io_merged_recursive,omitempty"` - IOTimeRecursive []BlkioStatsEntry `json:"io_time_recursive,omitempty" yaml:"io_time_recursive,omitempty"` - SectorsRecursive []BlkioStatsEntry `json:"sectors_recursive,omitempty" yaml:"sectors_recursive,omitempty"` - } `json:"blkio_stats,omitempty" yaml:"blkio_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty" yaml:"cpu_stats,omitempty"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` -} - -// CPUStats is a stats entry for cpu stats -type CPUStats struct { - CPUUsage struct { - PercpuUsage []uint64 `json:"percpu_usage,omitempty" yaml:"percpu_usage,omitempty"` - UsageInUsermode uint64 `json:"usage_in_usermode,omitempty" yaml:"usage_in_usermode,omitempty"` - TotalUsage uint64 `json:"total_usage,omitempty" yaml:"total_usage,omitempty"` - UsageInKernelmode uint64 `json:"usage_in_kernelmode,omitempty" yaml:"usage_in_kernelmode,omitempty"` - } `json:"cpu_usage,omitempty" yaml:"cpu_usage,omitempty"` - SystemCPUUsage uint64 `json:"system_cpu_usage,omitempty" yaml:"system_cpu_usage,omitempty"` - ThrottlingData struct { - Periods uint64 `json:"periods,omitempty"` - ThrottledPeriods uint64 `json:"throttled_periods,omitempty"` - ThrottledTime uint64 `json:"throttled_time,omitempty"` - } `json:"throttling_data,omitempty" yaml:"throttling_data,omitempty"` -} - -// BlkioStatsEntry is a stats entry for blkio_stats -type BlkioStatsEntry struct { - Major uint64 `json:"major,omitempty" yaml:"major,omitempty"` - Minor uint64 `json:"minor,omitempty" yaml:"minor,omitempty"` - Op string `json:"op,omitempty" yaml:"op,omitempty"` - Value uint64 `json:"value,omitempty" yaml:"value,omitempty"` -} - -// StatsOptions specify parameters to the Stats function. -// -// See https://goo.gl/GNmLHb for more details. -type StatsOptions struct { - ID string - Stats chan<- *Stats - Stream bool - // A flag that enables stopping the stats operation - Done <-chan bool - // Initial connection timeout - Timeout time.Duration -} - -// Stats sends container statistics for the given container to the given channel. -// -// This function is blocking, similar to a streaming call for logs, and should be run -// on a separate goroutine from the caller. Note that this function will block until -// the given container is removed, not just exited. When finished, this function -// will close the given channel. Alternatively, function can be stopped by -// signaling on the Done channel. -// -// See https://goo.gl/GNmLHb for more details. -func (c *Client) Stats(opts StatsOptions) (retErr error) { - errC := make(chan error, 1) - readCloser, writeCloser := io.Pipe() - - defer func() { - close(opts.Stats) - - select { - case err := <-errC: - if err != nil && retErr == nil { - retErr = err - } - default: - // No errors - } - - if err := readCloser.Close(); err != nil && retErr == nil { - retErr = err - } - }() - - go func() { - err := c.stream("GET", fmt.Sprintf("/containers/%s/stats?stream=%v", opts.ID, opts.Stream), streamOptions{ - rawJSONStream: true, - useJSONDecoder: true, - stdout: writeCloser, - timeout: opts.Timeout, - }) - if err != nil { - dockerError, ok := err.(*Error) - if ok { - if dockerError.Status == http.StatusNotFound { - err = &NoSuchContainer{ID: opts.ID} - } - } - } - if closeErr := writeCloser.Close(); closeErr != nil && err == nil { - err = closeErr - } - errC <- err - close(errC) - }() - - quit := make(chan struct{}) - defer close(quit) - go func() { - // block here waiting for the signal to stop function - select { - case <-opts.Done: - readCloser.Close() - case <-quit: - return - } - }() - - decoder := json.NewDecoder(readCloser) - stats := new(Stats) - for err := decoder.Decode(stats); err != io.EOF; err = decoder.Decode(stats) { - if err != nil { - return err - } - opts.Stats <- stats - stats = new(Stats) - } - return nil -} - -// KillContainerOptions represents the set of options that can be used in a -// call to KillContainer. -// -// See https://goo.gl/hkS9i8 for more details. -type KillContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // The signal to send to the container. When omitted, Docker server - // will assume SIGKILL. - Signal Signal -} - -// KillContainer sends a signal to a container, returning an error in case of -// failure. -// -// See https://goo.gl/hkS9i8 for more details. -func (c *Client) KillContainer(opts KillContainerOptions) error { - path := "/containers/" + opts.ID + "/kill" + "?" + queryString(opts) - _, status, err := c.do("POST", path, doOptions{}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - if err != nil { - return err - } - return nil -} - -// RemoveContainerOptions encapsulates options to remove a container. -// -// See https://goo.gl/RQyX62 for more details. -type RemoveContainerOptions struct { - // The ID of the container. - ID string `qs:"-"` - - // A flag that indicates whether Docker should remove the volumes - // associated to the container. - RemoveVolumes bool `qs:"v"` - - // A flag that indicates whether Docker should remove the container - // even if it is currently running. - Force bool -} - -// RemoveContainer removes a container, returning an error in case of failure. -// -// See https://goo.gl/RQyX62 for more details. -func (c *Client) RemoveContainer(opts RemoveContainerOptions) error { - path := "/containers/" + opts.ID + "?" + queryString(opts) - _, status, err := c.do("DELETE", path, doOptions{}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.ID} - } - if err != nil { - return err - } - return nil -} - -// UploadToContainerOptions is the set of options that can be used when -// uploading an archive into a container. -// -// See https://goo.gl/Ss97HW for more details. -type UploadToContainerOptions struct { - InputStream io.Reader `json:"-" qs:"-"` - Path string `qs:"path"` - NoOverwriteDirNonDir bool `qs:"noOverwriteDirNonDir"` -} - -// UploadToContainer uploads a tar archive to be extracted to a path in the -// filesystem of the container. -// -// See https://goo.gl/Ss97HW for more details. -func (c *Client) UploadToContainer(id string, opts UploadToContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("PUT", url, streamOptions{ - in: opts.InputStream, - }) -} - -// DownloadFromContainerOptions is the set of options that can be used when -// downloading resources from a container. -// -// See https://goo.gl/KnZJDX for more details. -type DownloadFromContainerOptions struct { - OutputStream io.Writer `json:"-" qs:"-"` - Path string `qs:"path"` -} - -// DownloadFromContainer downloads a tar archive of files or folders in a container. -// -// See https://goo.gl/KnZJDX for more details. -func (c *Client) DownloadFromContainer(id string, opts DownloadFromContainerOptions) error { - url := fmt.Sprintf("/containers/%s/archive?", id) + queryString(opts) - - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// CopyFromContainerOptions has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/R2jevW for more details. -type CopyFromContainerOptions struct { - OutputStream io.Writer `json:"-"` - Container string `json:"-"` - Resource string -} - -// CopyFromContainer has been DEPRECATED, please use DownloadFromContainerOptions along with DownloadFromContainer. -// -// See https://goo.gl/R2jevW for more details. -func (c *Client) CopyFromContainer(opts CopyFromContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - url := fmt.Sprintf("/containers/%s/copy", opts.Container) - body, status, err := c.do("POST", url, doOptions{data: opts}) - if status == http.StatusNotFound { - return &NoSuchContainer{ID: opts.Container} - } - if err != nil { - return err - } - _, err = io.Copy(opts.OutputStream, bytes.NewBuffer(body)) - return err -} - -// WaitContainer blocks until the given container stops, return the exit code -// of the container status. -// -// See https://goo.gl/Gc1rge for more details. -func (c *Client) WaitContainer(id string) (int, error) { - body, status, err := c.do("POST", "/containers/"+id+"/wait", doOptions{}) - if status == http.StatusNotFound { - return 0, &NoSuchContainer{ID: id} - } - if err != nil { - return 0, err - } - var r struct{ StatusCode int } - err = json.Unmarshal(body, &r) - if err != nil { - return 0, err - } - return r.StatusCode, nil -} - -// CommitContainerOptions aggregates parameters to the CommitContainer method. -// -// See https://goo.gl/mqfoCw for more details. -type CommitContainerOptions struct { - Container string - Repository string `qs:"repo"` - Tag string - Message string `qs:"m"` - Author string - Run *Config `qs:"-"` -} - -// CommitContainer creates a new image from a container's changes. -// -// See https://goo.gl/mqfoCw for more details. -func (c *Client) CommitContainer(opts CommitContainerOptions) (*Image, error) { - path := "/commit?" + queryString(opts) - body, status, err := c.do("POST", path, doOptions{data: opts.Run}) - if status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - if err != nil { - return nil, err - } - var image Image - err = json.Unmarshal(body, &image) - if err != nil { - return nil, err - } - return &image, nil -} - -// AttachToContainerOptions is the set of options that can be used when -// attaching to a container. -// -// See https://goo.gl/NKpkFk for more details. -type AttachToContainerOptions struct { - Container string `qs:"-"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // Get container logs, sending it to OutputStream. - Logs bool - - // Stream the response? - Stream bool - - // Attach to stdin, and use InputStream. - Stdin bool - - // Attach to stdout, and use OutputStream. - Stdout bool - - // Attach to stderr, and use ErrorStream. - Stderr bool - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// AttachToContainer attaches to a container, using the given options. -// -// See https://goo.gl/NKpkFk for more details. -func (c *Client) AttachToContainer(opts AttachToContainerOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - path := "/containers/" + opts.Container + "/attach?" + queryString(opts) - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// LogsOptions represents the set of options used when getting logs from a -// container. -// -// See https://goo.gl/yl8PGm for more details. -type LogsOptions struct { - Container string `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - Follow bool - Stdout bool - Stderr bool - Since int64 - Timestamps bool - Tail string - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` -} - -// Logs gets stdout and stderr logs from the specified container. -// -// See https://goo.gl/yl8PGm for more details. -func (c *Client) Logs(opts LogsOptions) error { - if opts.Container == "" { - return &NoSuchContainer{ID: opts.Container} - } - if opts.Tail == "" { - opts.Tail = "all" - } - path := "/containers/" + opts.Container + "/logs?" + queryString(opts) - return c.stream("GET", path, streamOptions{ - setRawTerminal: opts.RawTerminal, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - }) -} - -// ResizeContainerTTY resizes the terminal to the given height and width. -// -// See https://goo.gl/xERhCc for more details. -func (c *Client) ResizeContainerTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - _, _, err := c.do("POST", "/containers/"+id+"/resize?"+params.Encode(), doOptions{}) - return err -} - -// ExportContainerOptions is the set of parameters to the ExportContainer -// method. -// -// See https://goo.gl/dOkTyk for more details. -type ExportContainerOptions struct { - ID string - OutputStream io.Writer -} - -// ExportContainer export the contents of container id as tar archive -// and prints the exported contents to stdout. -// -// See https://goo.gl/dOkTyk for more details. -func (c *Client) ExportContainer(opts ExportContainerOptions) error { - if opts.ID == "" { - return &NoSuchContainer{ID: opts.ID} - } - url := fmt.Sprintf("/containers/%s/export", opts.ID) - return c.stream("GET", url, streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// NoSuchContainer is the error returned when a given container does not exist. -type NoSuchContainer struct { - ID string - Err error -} - -func (err *NoSuchContainer) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return "No such container: " + err.ID -} - -// ContainerAlreadyRunning is the error returned when a given container is -// already running. -type ContainerAlreadyRunning struct { - ID string -} - -func (err *ContainerAlreadyRunning) Error() string { - return "Container already running: " + err.ID -} - -// ContainerNotRunning is the error returned when a given container is not -// running. -type ContainerNotRunning struct { - ID string -} - -func (err *ContainerNotRunning) Error() string { - return "Container not running: " + err.ID -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go deleted file mode 100644 index d42c82fa0..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container_test.go +++ /dev/null @@ -1,1981 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/url" - "os" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -func TestStateString(t *testing.T) { - started := time.Now().Add(-3 * time.Hour) - var tests = []struct { - input State - expected string - }{ - {State{Running: true, Paused: true}, "^paused$"}, - {State{Running: true, StartedAt: started}, "^Up 3h.*$"}, - {State{Running: false, ExitCode: 7}, "^Exit 7$"}, - } - for _, tt := range tests { - re := regexp.MustCompile(tt.expected) - if got := tt.input.String(); !re.MatchString(got) { - t.Errorf("State.String(): wrong result. Want %q. Got %q.", tt.expected, got) - } - } -} - -func TestListContainers(t *testing.T) { - jsonContainers := `[ - { - "Id": "8dfafdbc3a40", - "Image": "base:latest", - "Command": "echo 1", - "Created": 1367854155, - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "9cd87474be90", - "Image": "base:latest", - "Command": "echo 222222", - "Created": 1367854155, - "Ports":[{"PrivatePort": 2222, "PublicPort": 3333, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "3176a2479c92", - "Image": "base:latest", - "Command": "echo 3333333333333333", - "Created": 1367854154, - "Ports":[{"PrivatePort": 2221, "PublicPort": 3331, "Type": "tcp"}], - "Status": "Exit 0" - }, - { - "Id": "4cb07b47f9fb", - "Image": "base:latest", - "Command": "echo 444444444444444444444444444444444", - "Ports":[{"PrivatePort": 2223, "PublicPort": 3332, "Type": "tcp"}], - "Created": 1367854152, - "Status": "Exit 0" - } -]` - var expected []APIContainers - err := json.Unmarshal([]byte(jsonContainers), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonContainers, status: http.StatusOK}) - containers, err := client.ListContainers(ListContainersOptions{}) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListContainers: Expected %#v. Got %#v.", expected, containers) - } -} - -func TestListContainersParams(t *testing.T) { - var tests = []struct { - input ListContainersOptions - params map[string][]string - }{ - {ListContainersOptions{}, map[string][]string{}}, - {ListContainersOptions{All: true}, map[string][]string{"all": {"1"}}}, - {ListContainersOptions{All: true, Limit: 10}, map[string][]string{"all": {"1"}, "limit": {"10"}}}, - { - ListContainersOptions{All: true, Limit: 10, Since: "adf9983", Before: "abdeef"}, - map[string][]string{"all": {"1"}, "limit": {"10"}, "since": {"adf9983"}, "before": {"abdeef"}}, - }, - { - ListContainersOptions{Filters: map[string][]string{"status": {"paused", "running"}}}, - map[string][]string{"filters": {"{\"status\":[\"paused\",\"running\"]}"}}, - }, - { - ListContainersOptions{All: true, Filters: map[string][]string{"exited": {"0"}, "status": {"exited"}}}, - map[string][]string{"all": {"1"}, "filters": {"{\"exited\":[\"0\"],\"status\":[\"exited\"]}"}}, - }, - } - fakeRT := &FakeRoundTripper{message: "[]", status: http.StatusOK} - client := newTestClient(fakeRT) - u, _ := url.Parse(client.getURL("/containers/json")) - for _, tt := range tests { - if _, err := client.ListContainers(tt.input); err != nil { - t.Error(err) - } - got := map[string][]string(fakeRT.requests[0].URL.Query()) - if !reflect.DeepEqual(got, tt.params) { - t.Errorf("Expected %#v, got %#v.", tt.params, got) - } - if path := fakeRT.requests[0].URL.Path; path != u.Path { - t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) - } - if meth := fakeRT.requests[0].Method; meth != "GET" { - t.Errorf("Wrong HTTP method. Want GET. Got %s.", meth) - } - fakeRT.Reset() - } -} - -func TestListContainersFailure(t *testing.T) { - var tests = []struct { - status int - message string - }{ - {400, "bad parameter"}, - {500, "internal server error"}, - } - for _, tt := range tests { - client := newTestClient(&FakeRoundTripper{message: tt.message, status: tt.status}) - expected := Error{Status: tt.status, Message: tt.message} - containers, err := client.ListContainers(ListContainersOptions{}) - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("Wrong error in ListContainers. Want %#v. Got %#v.", expected, err) - } - if len(containers) > 0 { - t.Errorf("ListContainers failure. Expected empty list. Got %#v.", containers) - } - } -} - -func TestInspectContainer(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "AppArmorProfile": "Profile", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": 34359738368, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "", - "SecurityOpt": [ - "label:user:USER" - ], - "Ulimits": [ - { "Name": "nofile", "Soft": 1024, "Hard": 2048 } - ] - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Node": { - "ID": "4I4E:QR4I:Z733:QEZK:5X44:Q4T7:W2DD:JRDY:KB2O:PODO:Z5SR:XRB6", - "IP": "192.168.99.105", - "Addra": "192.168.99.105:2376", - "Name": "node-01", - "Cpus": 4, - "Memory": 1048436736, - "Labels": { - "executiondriver": "native-0.2", - "kernelversion": "3.18.5-tinycore64", - "operatingsystem": "Boot2Docker 1.5.0 (TCL 5.4); master : a66bce5 - Tue Feb 10 23:31:27 UTC 2015", - "provider": "virtualbox", - "storagedriver": "aufs" - } - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false, - "CgroupParent": "/mesos", - "Memory": 17179869184, - "MemorySwap": 34359738368 - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*container, expected) { - t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestInspectContainerNegativeSwap(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Created": "2013-05-07T14:51:42.087658+02:00", - "Path": "date", - "Args": [], - "Config": { - "Hostname": "4fa6e0f0c678", - "User": "", - "Memory": 17179869184, - "MemorySwap": -1, - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "PortSpecs": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Image": "base", - "Volumes": {}, - "VolumesFrom": "" - }, - "State": { - "Running": false, - "Pid": 0, - "ExitCode": 0, - "StartedAt": "2013-05-07T14:51:42.087658+02:00", - "Ghost": false - }, - "Image": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "NetworkSettings": { - "IpAddress": "", - "IpPrefixLen": 0, - "Gateway": "", - "Bridge": "", - "PortMapping": null - }, - "SysInitPath": "/home/kitty/go/src/github.com/dotcloud/docker/bin/docker", - "ResolvConfPath": "/etc/resolv.conf", - "Volumes": {}, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": null, - "PublishAllPorts": false - } -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - container, err := client.InspectContainer(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*container, expected) { - t.Errorf("InspectContainer(%q): Expected %#v. Got %#v.", id, expected, container) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("InspectContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestInspectContainerFailure(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) - expected := Error{Status: 500, Message: "server error"} - container, err := client.InspectContainer("abe033") - if container != nil { - t.Errorf("InspectContainer: Expected container, got %#v", container) - } - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestInspectContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) - container, err := client.InspectContainer("abe033") - if container != nil { - t.Errorf("InspectContainer: Expected container, got %#v", container) - } - expected := &NoSuchContainer{ID: "abe033"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("InspectContainer: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestContainerChanges(t *testing.T) { - jsonChanges := `[ - { - "Path":"/dev", - "Kind":0 - }, - { - "Path":"/dev/kmsg", - "Kind":1 - }, - { - "Path":"/test", - "Kind":1 - } -]` - var expected []Change - err := json.Unmarshal([]byte(jsonChanges), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonChanges, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c678" - changes, err := client.ContainerChanges(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(changes, expected) { - t.Errorf("ContainerChanges(%q): Expected %#v. Got %#v.", id, expected, changes) - } - expectedURL, _ := url.Parse(client.getURL("/containers/4fa6e0f0c678/changes")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("ContainerChanges(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestContainerChangesFailure(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "server error", status: 500}) - expected := Error{Status: 500, Message: "server error"} - changes, err := client.ContainerChanges("abe033") - if changes != nil { - t.Errorf("ContainerChanges: Expected changes, got %#v", changes) - } - if !reflect.DeepEqual(expected, *err.(*Error)) { - t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestContainerChangesNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: 404}) - changes, err := client.ContainerChanges("abe033") - if changes != nil { - t.Errorf("ContainerChanges: Expected changes, got %#v", changes) - } - expected := &NoSuchContainer{ID: "abe033"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("ContainerChanges: Wrong error information. Want %#v. Got %#v.", expected, err) - } -} - -func TestCreateContainer(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Warnings": [] -}` - var expected Container - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{AttachStdout: true, AttachStdin: true} - opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} - container, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if container.ID != id { - t.Errorf("CreateContainer: wrong ID. Want %q. Got %q.", id, container.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("CreateContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/create")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("CreateContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody Config - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestCreateContainerImageNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusNotFound}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if !reflect.DeepEqual(err, ErrNoSuchImage) { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestCreateContainerDuplicateName(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "No such image", status: http.StatusConflict}) - config := Config{AttachStdout: true, AttachStdin: true} - container, err := client.CreateContainer(CreateContainerOptions{Config: &config}) - if container != nil { - t.Errorf("CreateContainer: expected container, got %#v.", container) - } - if err != ErrContainerAlreadyExists { - t.Errorf("CreateContainer: Wrong error type. Want %#v. Got %#v.", ErrContainerAlreadyExists, err) - } -} - -func TestCreateContainerWithHostConfig(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{} - hostConfig := HostConfig{PublishAllPorts: true} - opts := CreateContainerOptions{Name: "TestCreateContainerWithHostConfig", Config: &config, HostConfig: &hostConfig} - _, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - var gotBody map[string]interface{} - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - if _, ok := gotBody["HostConfig"]; !ok { - t.Errorf("CreateContainer: wrong body. HostConfig was not serialized") - } -} - -func TestStartContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, &HostConfig{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } -} - -func TestStartContainerNilHostConfig(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StartContainer(id, nil) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StartContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StartContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - expectedContentType := "application/json" - if contentType := req.Header.Get("Content-Type"); contentType != expectedContentType { - t.Errorf("StartContainer(%q): Wrong content-type in request. Want %q. Got %q.", id, expectedContentType, contentType) - } - var buf [4]byte - req.Body.Read(buf[:]) - if string(buf[:]) != "null" { - t.Errorf("Startcontainer(%q): Wrong body. Want null. Got %s", id, buf[:]) - } -} - -func TestStartContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.StartContainer("a2344", &HostConfig{}) - expected := &NoSuchContainer{ID: "a2344", Err: err.(*NoSuchContainer).Err} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStartContainerAlreadyRunning(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "container already running", status: http.StatusNotModified}) - err := client.StartContainer("a2334", &HostConfig{}) - expected := &ContainerAlreadyRunning{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStopContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.StopContainer(id, 10) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("StopContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/stop")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("StopContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestStopContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.StopContainer("a2334", 10) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestStopContainerNotRunning(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "container not running", status: http.StatusNotModified}) - err := client.StopContainer("a2334", 10) - expected := &ContainerNotRunning{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRestartContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.RestartContainer(id, 10) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("RestartContainer(%q, 10): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/restart")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RestartContainer(%q, 10): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRestartContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.RestartContainer("a2334", 10) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RestartContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestPauseContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.PauseContainer(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/pause")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestPauseContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.PauseContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestUnpauseContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.UnpauseContainer(id) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PauseContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/unpause")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("PauseContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestUnpauseContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.UnpauseContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("PauseContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestKillContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.KillContainer(KillContainerOptions{ID: id}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/kill")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("KillContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestKillContainerSignal(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.KillContainer(KillContainerOptions{ID: id, Signal: SIGTERM}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("KillContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - if signal := req.URL.Query().Get("signal"); signal != "15" { - t.Errorf("KillContainer(%q): Wrong query string in request. Want %q. Got %q.", id, "15", signal) - } -} - -func TestKillContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.KillContainer(KillContainerOptions{ID: "a2334"}) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("KillContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRemoveContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveContainerOptions{ID: id} - err := client.RemoveContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "DELETE" { - t.Errorf("RemoveContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "DELETE", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id)) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RemoveContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestRemoveContainerRemoveVolumes(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := RemoveContainerOptions{ID: id, RemoveVolumes: true} - err := client.RemoveContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - params := map[string][]string(req.URL.Query()) - expected := map[string][]string{"v": {"1"}} - if !reflect.DeepEqual(params, expected) { - t.Errorf("RemoveContainer(%q): wrong parameters. Want %#v. Got %#v.", id, expected, params) - } -} - -func TestRemoveContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - err := client.RemoveContainer(RemoveContainerOptions{ID: "a2334"}) - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("RemoveContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestResizeContainerTTY(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - err := client.ResizeContainerTTY(id, 40, 80) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ResizeContainerTTY(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/resize")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ResizeContainerTTY(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } - got := map[string][]string(req.URL.Query()) - expectedParams := map[string][]string{ - "w": {"80"}, - "h": {"40"}, - } - if !reflect.DeepEqual(got, expectedParams) { - t.Errorf("Expected %#v, got %#v.", expectedParams, got) - } -} - -func TestWaitContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: `{"StatusCode": 56}`, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - status, err := client.WaitContainer(id) - if err != nil { - t.Fatal(err) - } - if status != 56 { - t.Errorf("WaitContainer(%q): wrong return. Want 56. Got %d.", id, status) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("WaitContainer(%q): wrong HTTP method. Want %q. Got %q.", id, "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/" + id + "/wait")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("WaitContainer(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestWaitContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.WaitContainer("a2334") - expected := &NoSuchContainer{ID: "a2334"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("WaitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestCommitContainer(t *testing.T) { - response := `{"Id":"596069db4bf5"}` - client := newTestClient(&FakeRoundTripper{message: response, status: http.StatusOK}) - id := "596069db4bf5" - image, err := client.CommitContainer(CommitContainerOptions{}) - if err != nil { - t.Fatal(err) - } - if image.ID != id { - t.Errorf("CommitContainer: Wrong image id. Want %q. Got %q.", id, image.ID) - } -} - -func TestCommitContainerParams(t *testing.T) { - cfg := Config{Memory: 67108864} - json, _ := json.Marshal(&cfg) - var tests = []struct { - input CommitContainerOptions - params map[string][]string - body []byte - }{ - {CommitContainerOptions{}, map[string][]string{}, nil}, - {CommitContainerOptions{Container: "44c004db4b17"}, map[string][]string{"container": {"44c004db4b17"}}, nil}, - { - CommitContainerOptions{Container: "44c004db4b17", Repository: "tsuru/python", Message: "something"}, - map[string][]string{"container": {"44c004db4b17"}, "repo": {"tsuru/python"}, "m": {"something"}}, - nil, - }, - { - CommitContainerOptions{Container: "44c004db4b17", Run: &cfg}, - map[string][]string{"container": {"44c004db4b17"}}, - json, - }, - } - fakeRT := &FakeRoundTripper{message: "{}", status: http.StatusOK} - client := newTestClient(fakeRT) - u, _ := url.Parse(client.getURL("/commit")) - for _, tt := range tests { - if _, err := client.CommitContainer(tt.input); err != nil { - t.Error(err) - } - got := map[string][]string(fakeRT.requests[0].URL.Query()) - if !reflect.DeepEqual(got, tt.params) { - t.Errorf("Expected %#v, got %#v.", tt.params, got) - } - if path := fakeRT.requests[0].URL.Path; path != u.Path { - t.Errorf("Wrong path on request. Want %q. Got %q.", u.Path, path) - } - if meth := fakeRT.requests[0].Method; meth != "POST" { - t.Errorf("Wrong HTTP method. Want POST. Got %s.", meth) - } - if tt.body != nil { - if requestBody, err := ioutil.ReadAll(fakeRT.requests[0].Body); err == nil { - if bytes.Compare(requestBody, tt.body) != 0 { - t.Errorf("Expected body %#v, got %#v", tt.body, requestBody) - } - } else { - t.Errorf("Error reading request body: %#v", err) - } - } - fakeRT.Reset() - } -} - -func TestCommitContainerFailure(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusInternalServerError}) - _, err := client.CommitContainer(CommitContainerOptions{}) - if err == nil { - t.Error("Expected non-nil error, got .") - } -} - -func TestCommitContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.CommitContainer(CommitContainerOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("CommitContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestAttachToContainerLogs(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 19}) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &buf, - Stdout: true, - Stderr: true, - Logs: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("AttachToContainer for logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "POST" { - t.Errorf("AttachToContainer: wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/attach")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "logs": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestAttachToContainer(t *testing.T) { - var reader = strings.NewReader("send value") - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } - expected := map[string][]string{ - "stdin": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "stream": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestAttachToContainerSentinel(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - success := make(chan struct{}) - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - Success: success, - } - go func() { - if err := client.AttachToContainer(opts); err != nil { - t.Error(err) - } - }() - success <- <-success -} - -func TestAttachToContainerNilStdout(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: nil, - ErrorStream: &stderr, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestAttachToContainerNilStderr(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - InputStream: reader, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: true, - } - err := client.AttachToContainer(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestAttachToContainerRawTerminalFalse(t *testing.T) { - input := strings.NewReader("send value") - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - req = *r - w.WriteHeader(http.StatusOK) - hj, ok := w.(http.Hijacker) - if !ok { - t.Fatal("cannot hijack server connection") - } - conn, _, err := hj.Hijack() - if err != nil { - t.Fatal(err) - } - conn.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - conn.Write([]byte("hello")) - conn.Write([]byte{2, 0, 0, 0, 0, 0, 0, 6}) - conn.Write([]byte("hello!")) - conn.Close() - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - opts := AttachToContainerOptions{ - Container: "a123456", - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: input, - Stdin: true, - Stdout: true, - Stderr: true, - Stream: true, - RawTerminal: false, - } - client.AttachToContainer(opts) - expected := map[string][]string{ - "stdin": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "stream": {"1"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("AttachToContainer: wrong query string. Want %#v. Got %#v.", expected, got) - } - if stdout.String() != "hello" { - t.Errorf("AttachToContainer: wrong content written to stdout. Want %q. Got %q.", "hello", stdout.String()) - } - if stderr.String() != "hello!" { - t.Errorf("AttachToContainer: wrong content written to stderr. Want %q. Got %q.", "hello!", stderr.String()) - } -} - -func TestAttachToContainerWithoutContainer(t *testing.T) { - var client Client - err := client.AttachToContainer(AttachToContainerOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} - -func TestLogs(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"all"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestLogsNilStdoutDoesntFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsOptions{ - Container: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestLogsNilStderrDoesntFail(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{2, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - opts := LogsOptions{ - Container: "a123456", - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } -} - -func TestLogsSpecifyingTail(t *testing.T) { - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - prefix := []byte{1, 0, 0, 0, 0, 0, 0, 19} - w.Write(prefix) - w.Write([]byte("something happened!")) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } - if req.Method != "GET" { - t.Errorf("Logs: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/a123456/logs")) - if req.URL.Path != u.Path { - t.Errorf("AttachToContainer for logs: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQs := map[string][]string{ - "follow": {"1"}, - "stdout": {"1"}, - "stderr": {"1"}, - "timestamps": {"1"}, - "tail": {"100"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expectedQs) { - t.Errorf("Logs: wrong query string. Want %#v. Got %#v.", expectedQs, got) - } -} - -func TestLogsRawTerminal(t *testing.T) { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("something happened!")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var buf bytes.Buffer - opts := LogsOptions{ - Container: "a123456", - OutputStream: &buf, - Follow: true, - RawTerminal: true, - Stdout: true, - Stderr: true, - Timestamps: true, - Tail: "100", - } - err := client.Logs(opts) - if err != nil { - t.Fatal(err) - } - expected := "something happened!" - if buf.String() != expected { - t.Errorf("Logs: wrong output. Want %q. Got %q.", expected, buf.String()) - } -} - -func TestLogsNoContainer(t *testing.T) { - var client Client - err := client.Logs(LogsOptions{}) - expected := &NoSuchContainer{ID: ""} - if !reflect.DeepEqual(err, expected) { - t.Errorf("AttachToContainer: wrong error. Want %#v. Got %#v.", expected, err) - } -} - -func TestNoSuchContainerError(t *testing.T) { - var err = &NoSuchContainer{ID: "i345"} - expected := "No such container: i345" - if got := err.Error(); got != expected { - t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) - } -} - -func TestNoSuchContainerErrorMessage(t *testing.T) { - var err = &NoSuchContainer{ID: "i345", Err: errors.New("some advanced error info")} - expected := "some advanced error info" - if got := err.Error(); got != expected { - t.Errorf("NoSuchContainer: wrong message. Want %q. Got %q.", expected, got) - } -} - -func TestExportContainer(t *testing.T) { - content := "exported container tar content" - out := stdoutMock{bytes.NewBufferString(content)} - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} - err := client.ExportContainer(opts) - if err != nil { - t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func TestExportContainerViaUnixSocket(t *testing.T) { - if runtime.GOOS != "darwin" { - t.Skip(fmt.Sprintf("skipping test on %s", runtime.GOOS)) - } - content := "exported container tar content" - var buf []byte - out := bytes.NewBuffer(buf) - tempSocket := tempfile("export_socket") - defer os.Remove(tempSocket) - endpoint := "unix://" + tempSocket - u, _ := parseEndpoint(endpoint, false) - client := Client{ - HTTPClient: http.DefaultClient, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - } - listening := make(chan string) - done := make(chan int) - go runStreamConnServer(t, "unix", tempSocket, listening, done) - <-listening // wait for server to start - opts := ExportContainerOptions{ID: "4fa6e0f0c678", OutputStream: out} - err := client.ExportContainer(opts) - <-done // make sure server stopped - if err != nil { - t.Errorf("ExportContainer: caugh error %#v while exporting container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("ExportContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func runStreamConnServer(t *testing.T, network, laddr string, listening chan<- string, done chan<- int) { - defer close(done) - l, err := net.Listen(network, laddr) - if err != nil { - t.Errorf("Listen(%q, %q) failed: %v", network, laddr, err) - listening <- "" - return - } - defer l.Close() - listening <- l.Addr().String() - c, err := l.Accept() - if err != nil { - t.Logf("Accept failed: %v", err) - return - } - c.Write([]byte("HTTP/1.1 200 OK\n\nexported container tar content")) - c.Close() -} - -func tempfile(filename string) string { - return os.TempDir() + "/" + filename + "." + strconv.Itoa(os.Getpid()) -} - -func TestExportContainerNoId(t *testing.T) { - client := Client{} - out := stdoutMock{bytes.NewBufferString("")} - err := client.ExportContainer(ExportContainerOptions{OutputStream: out}) - e, ok := err.(*NoSuchContainer) - if !ok { - t.Errorf("ExportContainer: wrong error. Want NoSuchContainer. Got %#v.", e) - } - if e.ID != "" { - t.Errorf("ExportContainer: wrong ID. Want %q. Got %q", "", e.ID) - } -} - -func TestUploadToContainer(t *testing.T) { - content := "File content" - in := stdinMock{bytes.NewBufferString(content)} - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - opts := UploadToContainerOptions{ - Path: "abc", - InputStream: in, - } - err := client.UploadToContainer("a123456", opts) - if err != nil { - t.Errorf("UploadToContainer: caught error %#v while uploading archive to container, expected nil", err) - } - - req := fakeRT.requests[0] - - if req.Method != "PUT" { - t.Errorf("UploadToContainer{Path:abc}: Wrong HTTP method. Want PUT. Got %s", req.Method) - } - - if pathParam := req.URL.Query().Get("path"); pathParam != "abc" { - t.Errorf("ListImages({Path:abc}): Wrong parameter. Want path=abc. Got path=%s", pathParam) - } - -} - -func TestDownloadFromContainer(t *testing.T) { - filecontent := "File content" - client := newTestClient(&FakeRoundTripper{message: filecontent, status: http.StatusOK}) - - var out bytes.Buffer - opts := DownloadFromContainerOptions{ - OutputStream: &out, - } - err := client.DownloadFromContainer("a123456", opts) - if err != nil { - t.Errorf("DownloadFromContainer: caught error %#v while downloading from container, expected nil", err.Error()) - } - if out.String() != filecontent { - t.Errorf("DownloadFromContainer: wrong stdout. Want %#v. Got %#v.", filecontent, out.String()) - } -} - -func TestCopyFromContainer(t *testing.T) { - content := "File content" - out := stdoutMock{bytes.NewBufferString(content)} - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - opts := CopyFromContainerOptions{ - Container: "a123456", - OutputStream: &out, - } - err := client.CopyFromContainer(opts) - if err != nil { - t.Errorf("CopyFromContainer: caught error %#v while copying from container, expected nil", err.Error()) - } - if out.String() != content { - t.Errorf("CopyFromContainer: wrong stdout. Want %#v. Got %#v.", content, out.String()) - } -} - -func TestCopyFromContainerEmptyContainer(t *testing.T) { - client := newTestClient(&FakeRoundTripper{status: http.StatusOK}) - err := client.CopyFromContainer(CopyFromContainerOptions{}) - _, ok := err.(*NoSuchContainer) - if !ok { - t.Errorf("CopyFromContainer: invalid error returned. Want NoSuchContainer, got %#v.", err) - } -} - -func TestPassingNameOptToCreateContainerReturnsItInContainer(t *testing.T) { - jsonContainer := `{ - "Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2", - "Warnings": [] -}` - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := Config{AttachStdout: true, AttachStdin: true} - opts := CreateContainerOptions{Name: "TestCreateContainer", Config: &config} - container, err := client.CreateContainer(opts) - if err != nil { - t.Fatal(err) - } - if container.Name != "TestCreateContainer" { - t.Errorf("Container name expected to be TestCreateContainer, was %s", container.Name) - } -} - -func TestAlwaysRestart(t *testing.T) { - policy := AlwaysRestart() - if policy.Name != "always" { - t.Errorf("AlwaysRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("AlwaysRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestRestartOnFailure(t *testing.T) { - const retry = 5 - policy := RestartOnFailure(retry) - if policy.Name != "on-failure" { - t.Errorf("RestartOnFailure(%d): wrong policy name. Want %q. Got %q", retry, "on-failure", policy.Name) - } - if policy.MaximumRetryCount != retry { - t.Errorf("RestartOnFailure(%d): wrong MaximumRetryCount. Want %d. Got %d", retry, retry, policy.MaximumRetryCount) - } -} - -func TestNeverRestart(t *testing.T) { - policy := NeverRestart() - if policy.Name != "no" { - t.Errorf("NeverRestart(): wrong policy name. Want %q. Got %q", "always", policy.Name) - } - if policy.MaximumRetryCount != 0 { - t.Errorf("NeverRestart(): wrong MaximumRetryCount. Want 0. Got %d", policy.MaximumRetryCount) - } -} - -func TestTopContainer(t *testing.T) { - jsonTop := `{ - "Processes": [ - [ - "ubuntu", - "3087", - "815", - "0", - "01:44", - "?", - "00:00:00", - "cmd1" - ], - [ - "root", - "3158", - "3087", - "0", - "01:44", - "?", - "00:00:01", - "cmd2" - ] - ], - "Titles": [ - "UID", - "PID", - "PPID", - "C", - "STIME", - "TTY", - "TIME", - "CMD" - ] -}` - var expected TopResult - err := json.Unmarshal([]byte(jsonTop), &expected) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0" - fakeRT := &FakeRoundTripper{message: jsonTop, status: http.StatusOK} - client := newTestClient(fakeRT) - processes, err := client.TopContainer(id, "") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(processes, expected) { - t.Errorf("TopContainer: Expected %#v. Got %#v.", expected, processes) - } - if len(processes.Processes) != 2 || len(processes.Processes[0]) != 8 || - processes.Processes[0][7] != "cmd1" { - t.Errorf("TopContainer: Process list to include cmd1. Got %#v.", processes) - } - expectedURI := "/containers/" + id + "/top" - if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { - t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) - } -} - -func TestTopContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - _, err := client.TopContainer("abef348", "") - expected := &NoSuchContainer{ID: "abef348"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("StopContainer: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestTopContainerWithPsArgs(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "no such container", status: http.StatusNotFound} - client := newTestClient(fakeRT) - expectedErr := &NoSuchContainer{ID: "abef348"} - if _, err := client.TopContainer("abef348", "aux"); !reflect.DeepEqual(expectedErr, err) { - t.Errorf("TopContainer: Expected %v. Got %v.", expectedErr, err) - } - expectedURI := "/containers/abef348/top?ps_args=aux" - if !strings.HasSuffix(fakeRT.requests[0].URL.String(), expectedURI) { - t.Errorf("TopContainer: Expected URI to have %q. Got %q.", expectedURI, fakeRT.requests[0].URL.String()) - } -} - -func TestStatsTimeout(t *testing.T) { - l, err := net.Listen("unix", "/tmp/docker_test.sock") - if err != nil { - t.Fatal(err) - } - received := false - defer l.Close() - go func() { - l.Accept() - received = true - time.Sleep(time.Second) - }() - client, _ := NewClient("unix:///tmp/docker_test.sock") - client.SkipServerVersionCheck = true - errC := make(chan error, 1) - statsC := make(chan *Stats) - done := make(chan bool) - go func() { - errC <- client.Stats(StatsOptions{"c", statsC, true, done, time.Millisecond * 100}) - close(errC) - }() - err = <-errC - e, ok := err.(net.Error) - if !ok || !e.Timeout() { - t.Error("Failed to receive timeout exception") - } - if !received { - t.Fatal("Failed to receive message") - } -} - -func TestStats(t *testing.T) { - jsonStats1 := `{ - "read" : "2015-01-08T22:57:31.547920715Z", - "network" : { - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - }, - "memory_stats" : { - "stats" : { - "total_pgmajfault" : 0, - "cache" : 0, - "mapped_file" : 0, - "total_inactive_file" : 0, - "pgpgout" : 414, - "rss" : 6537216, - "total_mapped_file" : 0, - "writeback" : 0, - "unevictable" : 0, - "pgpgin" : 477, - "total_unevictable" : 0, - "pgmajfault" : 0, - "total_rss" : 6537216, - "total_rss_huge" : 6291456, - "total_writeback" : 0, - "total_inactive_anon" : 0, - "rss_huge" : 6291456, - "hierarchical_memory_limit": 189204833, - "total_pgfault" : 964, - "total_active_file" : 0, - "active_anon" : 6537216, - "total_active_anon" : 6537216, - "total_pgpgout" : 414, - "total_cache" : 0, - "inactive_anon" : 0, - "active_file" : 0, - "pgfault" : 964, - "inactive_file" : 0, - "total_pgpgin" : 477 - }, - "max_usage" : 6651904, - "usage" : 6537216, - "failcnt" : 0, - "limit" : 67108864 - }, - "blkio_stats": { - "io_service_bytes_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 428795731968 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 388177920 - } - ], - "io_serviced_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 25994442 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 1734 - } - ], - "io_queue_recursive": [], - "io_service_time_recursive": [], - "io_wait_time_recursive": [], - "io_merged_recursive": [], - "io_time_recursive": [], - "sectors_recursive": [] - }, - "cpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - }, - "precpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - } - }` - // 1 second later, cache is 100 - jsonStats2 := `{ - "read" : "2015-01-08T22:57:32.547920715Z", - "network" : { - "rx_dropped" : 0, - "rx_bytes" : 648, - "rx_errors" : 0, - "tx_packets" : 8, - "tx_dropped" : 0, - "rx_packets" : 8, - "tx_errors" : 0, - "tx_bytes" : 648 - }, - "memory_stats" : { - "stats" : { - "total_pgmajfault" : 0, - "cache" : 100, - "mapped_file" : 0, - "total_inactive_file" : 0, - "pgpgout" : 414, - "rss" : 6537216, - "total_mapped_file" : 0, - "writeback" : 0, - "unevictable" : 0, - "pgpgin" : 477, - "total_unevictable" : 0, - "pgmajfault" : 0, - "total_rss" : 6537216, - "total_rss_huge" : 6291456, - "total_writeback" : 0, - "total_inactive_anon" : 0, - "rss_huge" : 6291456, - "total_pgfault" : 964, - "total_active_file" : 0, - "active_anon" : 6537216, - "total_active_anon" : 6537216, - "total_pgpgout" : 414, - "total_cache" : 0, - "inactive_anon" : 0, - "active_file" : 0, - "pgfault" : 964, - "inactive_file" : 0, - "total_pgpgin" : 477 - }, - "max_usage" : 6651904, - "usage" : 6537216, - "failcnt" : 0, - "limit" : 67108864 - }, - "blkio_stats": { - "io_service_bytes_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 428795731968 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 388177920 - } - ], - "io_serviced_recursive": [ - { - "major": 8, - "minor": 0, - "op": "Read", - "value": 25994442 - }, - { - "major": 8, - "minor": 0, - "op": "Write", - "value": 1734 - } - ], - "io_queue_recursive": [], - "io_service_time_recursive": [], - "io_wait_time_recursive": [], - "io_merged_recursive": [], - "io_time_recursive": [], - "sectors_recursive": [] - }, - "cpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - }, - "precpu_stats" : { - "cpu_usage" : { - "percpu_usage" : [ - 16970827, - 1839451, - 7107380, - 10571290 - ], - "usage_in_usermode" : 10000000, - "total_usage" : 36488948, - "usage_in_kernelmode" : 20000000 - }, - "system_cpu_usage" : 20091722000000000 - } - }` - var expected1 Stats - var expected2 Stats - err := json.Unmarshal([]byte(jsonStats1), &expected1) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal([]byte(jsonStats2), &expected2) - if err != nil { - t.Fatal(err) - } - id := "4fa6e0f0" - - var req http.Request - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(jsonStats1)) - w.Write([]byte(jsonStats2)) - req = *r - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - errC := make(chan error, 1) - statsC := make(chan *Stats) - done := make(chan bool) - go func() { - errC <- client.Stats(StatsOptions{id, statsC, true, done, 0}) - close(errC) - }() - var resultStats []*Stats - for { - stats, ok := <-statsC - if !ok { - break - } - resultStats = append(resultStats, stats) - } - err = <-errC - if err != nil { - t.Fatal(err) - } - if len(resultStats) != 2 { - t.Fatalf("Stats: Expected 2 results. Got %d.", len(resultStats)) - } - if !reflect.DeepEqual(resultStats[0], &expected1) { - t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected1, resultStats[0]) - } - if !reflect.DeepEqual(resultStats[1], &expected2) { - t.Errorf("Stats: Expected:\n%+v\nGot:\n%+v", expected2, resultStats[1]) - } - if req.Method != "GET" { - t.Errorf("Stats: wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/containers/" + id + "/stats")) - if req.URL.Path != u.Path { - t.Errorf("Stats: wrong HTTP path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestStatsContainerNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such container", status: http.StatusNotFound}) - statsC := make(chan *Stats) - done := make(chan bool) - err := client.Stats(StatsOptions{"abef348", statsC, true, done, 0}) - expected := &NoSuchContainer{ID: "abef348"} - if !reflect.DeepEqual(err, expected) { - t.Errorf("Stats: Wrong error returned. Want %#v. Got %#v.", expected, err) - } -} - -func TestRenameContainer(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := RenameContainerOptions{ID: "something_old", Name: "something_new"} - err := client.RenameContainer(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("RenameContainer: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/something_old/rename?name=something_new")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("RenameContainer: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - expectedValues := expectedURL.Query()["name"] - actualValues := req.URL.Query()["name"] - if len(actualValues) != 1 || expectedValues[0] != actualValues[0] { - t.Errorf("RenameContainer: Wrong params in request. Want %q. Got %q.", expectedValues, actualValues) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go deleted file mode 100644 index c54b0b0e8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2014 Docker authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "strconv" - "strings" -) - -// Env represents a list of key-pair represented in the form KEY=VALUE. -type Env []string - -// Get returns the string value of the given key. -func (env *Env) Get(key string) (value string) { - return env.Map()[key] -} - -// Exists checks whether the given key is defined in the internal Env -// representation. -func (env *Env) Exists(key string) bool { - _, exists := env.Map()[key] - return exists -} - -// GetBool returns a boolean representation of the given key. The key is false -// whenever its value if 0, no, false, none or an empty string. Any other value -// will be interpreted as true. -func (env *Env) GetBool(key string) (value bool) { - s := strings.ToLower(strings.Trim(env.Get(key), " \t")) - if s == "" || s == "0" || s == "no" || s == "false" || s == "none" { - return false - } - return true -} - -// SetBool defines a boolean value to the given key. -func (env *Env) SetBool(key string, value bool) { - if value { - env.Set(key, "1") - } else { - env.Set(key, "0") - } -} - -// GetInt returns the value of the provided key, converted to int. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt(key string) int { - return int(env.GetInt64(key)) -} - -// SetInt defines an integer value to the given key. -func (env *Env) SetInt(key string, value int) { - env.Set(key, strconv.Itoa(value)) -} - -// GetInt64 returns the value of the provided key, converted to int64. -// -// It the value cannot be represented as an integer, it returns -1. -func (env *Env) GetInt64(key string) int64 { - s := strings.Trim(env.Get(key), " \t") - val, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return -1 - } - return val -} - -// SetInt64 defines an integer (64-bit wide) value to the given key. -func (env *Env) SetInt64(key string, value int64) { - env.Set(key, strconv.FormatInt(value, 10)) -} - -// GetJSON unmarshals the value of the provided key in the provided iface. -// -// iface is a value that can be provided to the json.Unmarshal function. -func (env *Env) GetJSON(key string, iface interface{}) error { - sval := env.Get(key) - if sval == "" { - return nil - } - return json.Unmarshal([]byte(sval), iface) -} - -// SetJSON marshals the given value to JSON format and stores it using the -// provided key. -func (env *Env) SetJSON(key string, value interface{}) error { - sval, err := json.Marshal(value) - if err != nil { - return err - } - env.Set(key, string(sval)) - return nil -} - -// GetList returns a list of strings matching the provided key. It handles the -// list as a JSON representation of a list of strings. -// -// If the given key matches to a single string, it will return a list -// containing only the value that matches the key. -func (env *Env) GetList(key string) []string { - sval := env.Get(key) - if sval == "" { - return nil - } - var l []string - if err := json.Unmarshal([]byte(sval), &l); err != nil { - l = append(l, sval) - } - return l -} - -// SetList stores the given list in the provided key, after serializing it to -// JSON format. -func (env *Env) SetList(key string, value []string) error { - return env.SetJSON(key, value) -} - -// Set defines the value of a key to the given string. -func (env *Env) Set(key, value string) { - *env = append(*env, key+"="+value) -} - -// Decode decodes `src` as a json dictionary, and adds each decoded key-value -// pair to the environment. -// -// If `src` cannot be decoded as a json dictionary, an error is returned. -func (env *Env) Decode(src io.Reader) error { - m := make(map[string]interface{}) - if err := json.NewDecoder(src).Decode(&m); err != nil { - return err - } - for k, v := range m { - env.SetAuto(k, v) - } - return nil -} - -// SetAuto will try to define the Set* method to call based on the given value. -func (env *Env) SetAuto(key string, value interface{}) { - if fval, ok := value.(float64); ok { - env.SetInt64(key, int64(fval)) - } else if sval, ok := value.(string); ok { - env.Set(key, sval) - } else if val, err := json.Marshal(value); err == nil { - env.Set(key, string(val)) - } else { - env.Set(key, fmt.Sprintf("%v", value)) - } -} - -// Map returns the map representation of the env. -func (env *Env) Map() map[string]string { - if len(*env) == 0 { - return nil - } - m := make(map[string]string) - for _, kv := range *env { - parts := strings.SplitN(kv, "=", 2) - m[parts[0]] = parts[1] - } - return m -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go deleted file mode 100644 index df5169d06..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/env_test.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the DOCKER-LICENSE file. - -package docker - -import ( - "bytes" - "errors" - "reflect" - "sort" - "testing" -) - -func TestGet(t *testing.T) { - var tests = []struct { - input []string - query string - expected string - }{ - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PATH", "/usr/bin:/bin"}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", "/usr/local"}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", ""}, - {[]string{"WAT="}, "WAT", ""}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Get(tt.query) - if got != tt.expected { - t.Errorf("Env.Get(%q): wrong result. Want %q. Got %q", tt.query, tt.expected, got) - } - } -} - -func TestExists(t *testing.T) { - var tests = []struct { - input []string - query string - expected bool - }{ - {[]string{"WAT=", "PYTHONPATH=/usr/local"}, "WAT", true}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATH", true}, - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, "PYTHONPATHI", false}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Exists(tt.query) - if got != tt.expected { - t.Errorf("Env.Exists(%q): wrong result. Want %v. Got %v", tt.query, tt.expected, got) - } - } -} - -func TestGetBool(t *testing.T) { - var tests = []struct { - input string - expected bool - }{ - {"EMTPY_VAR", false}, {"ZERO_VAR", false}, {"NO_VAR", false}, - {"FALSE_VAR", false}, {"NONE_VAR", false}, {"TRUE_VAR", true}, - {"WAT", true}, {"PATH", true}, {"ONE_VAR", true}, {"NO_VAR_TAB", false}, - } - env := Env([]string{ - "EMPTY_VAR=", "ZERO_VAR=0", "NO_VAR=no", "FALSE_VAR=false", - "NONE_VAR=none", "TRUE_VAR=true", "WAT=wat", "PATH=/usr/bin:/bin", - "ONE_VAR=1", "NO_VAR_TAB=0 \t\t\t", - }) - for _, tt := range tests { - got := env.GetBool(tt.input) - if got != tt.expected { - t.Errorf("Env.GetBool(%q): wrong result. Want %v. Got %v.", tt.input, tt.expected, got) - } - } -} - -func TestSetBool(t *testing.T) { - var tests = []struct { - input bool - expected string - }{ - {true, "1"}, {false, "0"}, - } - for _, tt := range tests { - var env Env - env.SetBool("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetInt(t *testing.T) { - var tests = []struct { - input string - expected int - }{ - {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, - } - env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) - for _, tt := range tests { - got := env.GetInt(tt.input) - if got != tt.expected { - t.Errorf("Env.GetInt(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) - } - } -} - -func TestSetInt(t *testing.T) { - var tests = []struct { - input int - expected string - }{ - {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, - {0, "0"}, {-34, "-34"}, - } - for _, tt := range tests { - var env Env - env.SetInt("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetInt64(t *testing.T) { - var tests = []struct { - input string - expected int64 - }{ - {"NEGATIVE_INTEGER", -10}, {"NON_INTEGER", -1}, {"ONE", 1}, {"TWO", 2}, - } - env := Env([]string{"NEGATIVE_INTEGER=-10", "NON_INTEGER=wat", "ONE=1", "TWO=2"}) - for _, tt := range tests { - got := env.GetInt64(tt.input) - if got != tt.expected { - t.Errorf("Env.GetInt64(%q): wrong result. Want %d. Got %d", tt.input, tt.expected, got) - } - } -} - -func TestSetInt64(t *testing.T) { - var tests = []struct { - input int64 - expected string - }{ - {10, "10"}, {13, "13"}, {7, "7"}, {33, "33"}, - {0, "0"}, {-34, "-34"}, - } - for _, tt := range tests { - var env Env - env.SetInt64("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetBool(%d): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestGetJSON(t *testing.T) { - var p struct { - Name string `json:"name"` - Age int `json:"age"` - } - var env Env - env.Set("person", `{"name":"Gopher","age":5}`) - err := env.GetJSON("person", &p) - if err != nil { - t.Error(err) - } - if p.Name != "Gopher" { - t.Errorf("Env.GetJSON(%q): wrong name. Want %q. Got %q", "person", "Gopher", p.Name) - } - if p.Age != 5 { - t.Errorf("Env.GetJSON(%q): wrong age. Want %d. Got %d", "person", 5, p.Age) - } -} - -func TestGetJSONAbsent(t *testing.T) { - var l []string - var env Env - err := env.GetJSON("person", &l) - if err != nil { - t.Error(err) - } - if l != nil { - t.Errorf("Env.GetJSON(): get unexpected list %v", l) - } -} - -func TestGetJSONFailure(t *testing.T) { - var p []string - var env Env - env.Set("list-person", `{"name":"Gopher","age":5}`) - err := env.GetJSON("list-person", &p) - if err == nil { - t.Errorf("Env.GetJSON(%q): got unexpected error.", "list-person") - } -} - -func TestSetJSON(t *testing.T) { - var p1 = struct { - Name string `json:"name"` - Age int `json:"age"` - }{Name: "Gopher", Age: 5} - var env Env - err := env.SetJSON("person", p1) - if err != nil { - t.Error(err) - } - var p2 struct { - Name string `json:"name"` - Age int `json:"age"` - } - err = env.GetJSON("person", &p2) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(p1, p2) { - t.Errorf("Env.SetJSON(%q): wrong result. Want %v. Got %v", "person", p1, p2) - } -} - -func TestSetJSONFailure(t *testing.T) { - var env Env - err := env.SetJSON("person", unmarshable{}) - if err == nil { - t.Error("Env.SetJSON(): got unexpected error") - } - if env.Exists("person") { - t.Errorf("Env.SetJSON(): should not define the key %q, but did", "person") - } -} - -func TestGetList(t *testing.T) { - var tests = []struct { - input string - expected []string - }{ - {"WAT=wat", []string{"wat"}}, - {`WAT=["wat","wet","wit","wot","wut"]`, []string{"wat", "wet", "wit", "wot", "wut"}}, - {"WAT=", nil}, - } - for _, tt := range tests { - env := Env([]string{tt.input}) - got := env.GetList("WAT") - if !reflect.DeepEqual(got, tt.expected) { - t.Errorf("Env.GetList(%q): wrong result. Want %v. Got %v", "WAT", tt.expected, got) - } - } -} - -func TestSetList(t *testing.T) { - list := []string{"a", "b", "c"} - var env Env - if err := env.SetList("SOME", list); err != nil { - t.Error(err) - } - if got := env.GetList("SOME"); !reflect.DeepEqual(got, list) { - t.Errorf("Env.SetList(%v): wrong result. Got %v", list, got) - } -} - -func TestSet(t *testing.T) { - var env Env - env.Set("PATH", "/home/bin:/bin") - env.Set("SOMETHING", "/usr/bin") - env.Set("PATH", "/bin") - if expected, got := "/usr/bin", env.Get("SOMETHING"); got != expected { - t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) - } - if expected, got := "/bin", env.Get("PATH"); got != expected { - t.Errorf("Env.Set(%q): wrong result. Want %q. Got %q", expected, expected, got) - } -} - -func TestDecode(t *testing.T) { - var tests = []struct { - input string - expectedOut []string - expectedErr string - }{ - { - `{"PATH":"/usr/bin:/bin","containers":54,"wat":["123","345"]}`, - []string{"PATH=/usr/bin:/bin", "containers=54", `wat=["123","345"]`}, - "", - }, - {"}}", nil, "invalid character '}' looking for beginning of value"}, - {`{}`, nil, ""}, - } - for _, tt := range tests { - var env Env - err := env.Decode(bytes.NewBufferString(tt.input)) - if tt.expectedErr == "" { - if err != nil { - t.Error(err) - } - } else if tt.expectedErr != err.Error() { - t.Errorf("Env.Decode(): invalid error. Want %q. Got %q.", tt.expectedErr, err) - } - got := []string(env) - sort.Strings(got) - sort.Strings(tt.expectedOut) - if !reflect.DeepEqual(got, tt.expectedOut) { - t.Errorf("Env.Decode(): wrong result. Want %v. Got %v.", tt.expectedOut, got) - } - } -} - -func TestSetAuto(t *testing.T) { - buf := bytes.NewBufferString("oi") - var tests = []struct { - input interface{} - expected string - }{ - {10, "10"}, - {10.3, "10"}, - {"oi", "oi"}, - {buf, "{}"}, - {unmarshable{}, "{}"}, - } - for _, tt := range tests { - var env Env - env.SetAuto("SOME", tt.input) - if got := env.Get("SOME"); got != tt.expected { - t.Errorf("Env.SetAuto(%v): wrong result. Want %q. Got %q", tt.input, tt.expected, got) - } - } -} - -func TestMap(t *testing.T) { - var tests = []struct { - input []string - expected map[string]string - }{ - {[]string{"PATH=/usr/bin:/bin", "PYTHONPATH=/usr/local"}, map[string]string{"PATH": "/usr/bin:/bin", "PYTHONPATH": "/usr/local"}}, - {nil, nil}, - } - for _, tt := range tests { - env := Env(tt.input) - got := env.Map() - if !reflect.DeepEqual(got, tt.expected) { - t.Errorf("Env.Map(): wrong result. Want %v. Got %v", tt.expected, got) - } - } -} - -type unmarshable struct { -} - -func (unmarshable) MarshalJSON() ([]byte, error) { - return nil, errors.New("cannot marshal") -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go deleted file mode 100644 index eaffddb82..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "net" - "net/http" - "net/http/httputil" - "sync" - "sync/atomic" - "time" -) - -// APIEvents represents an event returned by the API. -type APIEvents struct { - Status string `json:"Status,omitempty" yaml:"Status,omitempty"` - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - From string `json:"From,omitempty" yaml:"From,omitempty"` - Time int64 `json:"Time,omitempty" yaml:"Time,omitempty"` -} - -type eventMonitoringState struct { - sync.RWMutex - sync.WaitGroup - enabled bool - lastSeen *int64 - C chan *APIEvents - errC chan error - listeners []chan<- *APIEvents -} - -const ( - maxMonitorConnRetries = 5 - retryInitialWaitTime = 10. -) - -var ( - // ErrNoListeners is the error returned when no listeners are available - // to receive an event. - ErrNoListeners = errors.New("no listeners present to receive event") - - // ErrListenerAlreadyExists is the error returned when the listerner already - // exists. - ErrListenerAlreadyExists = errors.New("listener already exists for docker events") - - // EOFEvent is sent when the event listener receives an EOF error. - EOFEvent = &APIEvents{ - Status: "EOF", - } -) - -// AddEventListener adds a new listener to container events in the Docker API. -// -// The parameter is a channel through which events will be sent. -func (c *Client) AddEventListener(listener chan<- *APIEvents) error { - var err error - if !c.eventMonitor.isEnabled() { - err = c.eventMonitor.enableEventMonitoring(c) - if err != nil { - return err - } - } - err = c.eventMonitor.addListener(listener) - if err != nil { - return err - } - return nil -} - -// RemoveEventListener removes a listener from the monitor. -func (c *Client) RemoveEventListener(listener chan *APIEvents) error { - err := c.eventMonitor.removeListener(listener) - if err != nil { - return err - } - if len(c.eventMonitor.listeners) == 0 { - c.eventMonitor.disableEventMonitoring() - } - return nil -} - -func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - return ErrListenerAlreadyExists - } - eventState.Add(1) - eventState.listeners = append(eventState.listeners, listener) - return nil -} - -func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error { - eventState.Lock() - defer eventState.Unlock() - if listenerExists(listener, &eventState.listeners) { - var newListeners []chan<- *APIEvents - for _, l := range eventState.listeners { - if l != listener { - newListeners = append(newListeners, l) - } - } - eventState.listeners = newListeners - eventState.Add(-1) - } - return nil -} - -func (eventState *eventMonitoringState) closeListeners() { - for _, l := range eventState.listeners { - close(l) - eventState.Add(-1) - } - eventState.listeners = nil -} - -func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool { - for _, b := range *list { - if b == a { - return true - } - } - return false -} - -func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error { - eventState.Lock() - defer eventState.Unlock() - if !eventState.enabled { - eventState.enabled = true - var lastSeenDefault = int64(0) - eventState.lastSeen = &lastSeenDefault - eventState.C = make(chan *APIEvents, 100) - eventState.errC = make(chan error, 1) - go eventState.monitorEvents(c) - } - return nil -} - -func (eventState *eventMonitoringState) disableEventMonitoring() error { - eventState.Lock() - defer eventState.Unlock() - - eventState.closeListeners() - - eventState.Wait() - - if eventState.enabled { - eventState.enabled = false - close(eventState.C) - close(eventState.errC) - } - return nil -} - -func (eventState *eventMonitoringState) monitorEvents(c *Client) { - var err error - for eventState.noListeners() { - time.Sleep(10 * time.Millisecond) - } - if err = eventState.connectWithRetry(c); err != nil { - // terminate if connect failed - eventState.disableEventMonitoring() - return - } - for eventState.isEnabled() { - timeout := time.After(100 * time.Millisecond) - select { - case ev, ok := <-eventState.C: - if !ok { - return - } - if ev == EOFEvent { - eventState.disableEventMonitoring() - return - } - eventState.updateLastSeen(ev) - go eventState.sendEvent(ev) - case err = <-eventState.errC: - if err == ErrNoListeners { - eventState.disableEventMonitoring() - return - } else if err != nil { - defer func() { go eventState.monitorEvents(c) }() - return - } - case <-timeout: - continue - } - } -} - -func (eventState *eventMonitoringState) connectWithRetry(c *Client) error { - var retries int - var err error - for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ { - waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries))) - time.Sleep(time.Duration(waitTime) * time.Millisecond) - err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC) - } - return err -} - -func (eventState *eventMonitoringState) noListeners() bool { - eventState.RLock() - defer eventState.RUnlock() - return len(eventState.listeners) == 0 -} - -func (eventState *eventMonitoringState) isEnabled() bool { - eventState.RLock() - defer eventState.RUnlock() - return eventState.enabled -} - -func (eventState *eventMonitoringState) sendEvent(event *APIEvents) { - eventState.RLock() - defer eventState.RUnlock() - eventState.Add(1) - defer eventState.Done() - if eventState.enabled { - if len(eventState.listeners) == 0 { - eventState.errC <- ErrNoListeners - return - } - - for _, listener := range eventState.listeners { - listener <- event - } - } -} - -func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) { - eventState.Lock() - defer eventState.Unlock() - if atomic.LoadInt64(eventState.lastSeen) < e.Time { - atomic.StoreInt64(eventState.lastSeen, e.Time) - } -} - -func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error { - uri := "/events" - if startTime != 0 { - uri += fmt.Sprintf("?since=%d", startTime) - } - protocol := c.endpointURL.Scheme - address := c.endpointURL.Path - if protocol != "unix" { - protocol = "tcp" - address = c.endpointURL.Host - } - var dial net.Conn - var err error - if c.TLSConfig == nil { - dial, err = c.Dialer.Dial(protocol, address) - } else { - dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig) - } - if err != nil { - return err - } - conn := httputil.NewClientConn(dial, nil) - req, err := http.NewRequest("GET", uri, nil) - if err != nil { - return err - } - res, err := conn.Do(req) - if err != nil { - return err - } - go func(res *http.Response, conn *httputil.ClientConn) { - defer conn.Close() - defer res.Body.Close() - decoder := json.NewDecoder(res.Body) - for { - var event APIEvents - if err = decoder.Decode(&event); err != nil { - if err == io.EOF || err == io.ErrUnexpectedEOF { - if c.eventMonitor.isEnabled() { - // Signal that we're exiting. - eventChan <- EOFEvent - } - break - } - errChan <- err - } - if event.Time == 0 { - continue - } - if !c.eventMonitor.isEnabled() { - return - } - eventChan <- &event - } - }(res, conn) - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go deleted file mode 100644 index a308538cc..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/event_test.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bufio" - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" -) - -func TestEventListeners(t *testing.T) { - testEventListeners("TestEventListeners", t, httptest.NewServer, NewClient) -} - -func TestTLSEventListeners(t *testing.T) { - testEventListeners("TestTLSEventListeners", t, func(handler http.Handler) *httptest.Server { - server := httptest.NewUnstartedServer(handler) - - cert, err := tls.LoadX509KeyPair("testing/data/server.pem", "testing/data/serverkey.pem") - if err != nil { - t.Fatalf("Error loading server key pair: %s", err) - } - - caCert, err := ioutil.ReadFile("testing/data/ca.pem") - if err != nil { - t.Fatalf("Error loading ca certificate: %s", err) - } - caPool := x509.NewCertPool() - if !caPool.AppendCertsFromPEM(caCert) { - t.Fatalf("Could not add ca certificate") - } - - server.TLS = &tls.Config{ - Certificates: []tls.Certificate{cert}, - RootCAs: caPool, - } - server.StartTLS() - return server - }, func(url string) (*Client, error) { - return NewTLSClient(url, "testing/data/cert.pem", "testing/data/key.pem", "testing/data/ca.pem") - }) -} - -func testEventListeners(testName string, t *testing.T, buildServer func(http.Handler) *httptest.Server, buildClient func(string) (*Client, error)) { - response := `{"status":"create","id":"dfdf82bd3881","from":"base:latest","time":1374067924} -{"status":"start","id":"dfdf82bd3881","from":"base:latest","time":1374067924} -{"status":"stop","id":"dfdf82bd3881","from":"base:latest","time":1374067966} -{"status":"destroy","id":"dfdf82bd3881","from":"base:latest","time":1374067970} -` - - server := buildServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - rsc := bufio.NewScanner(strings.NewReader(response)) - for rsc.Scan() { - w.Write([]byte(rsc.Text())) - w.(http.Flusher).Flush() - time.Sleep(10 * time.Millisecond) - } - })) - defer server.Close() - - client, err := buildClient(server.URL) - if err != nil { - t.Errorf("Failed to create client: %s", err) - } - client.SkipServerVersionCheck = true - - listener := make(chan *APIEvents, 10) - defer func() { - time.Sleep(10 * time.Millisecond) - if err := client.RemoveEventListener(listener); err != nil { - t.Error(err) - } - }() - - err = client.AddEventListener(listener) - if err != nil { - t.Errorf("Failed to add event listener: %s", err) - } - - timeout := time.After(1 * time.Second) - var count int - - for { - select { - case msg := <-listener: - t.Logf("Received: %v", *msg) - count++ - err = checkEvent(count, msg) - if err != nil { - t.Fatalf("Check event failed: %s", err) - } - if count == 4 { - return - } - case <-timeout: - t.Fatalf("%s timed out waiting on events", testName) - } - } -} - -func checkEvent(index int, event *APIEvents) error { - if event.ID != "dfdf82bd3881" { - return fmt.Errorf("event ID did not match. Expected dfdf82bd3881 got %s", event.ID) - } - if event.From != "base:latest" { - return fmt.Errorf("event from did not match. Expected base:latest got %s", event.From) - } - var status string - switch index { - case 1: - status = "create" - case 2: - status = "start" - case 3: - status = "stop" - case 4: - status = "destroy" - } - if event.Status != status { - return fmt.Errorf("event status did not match. Expected %s got %s", status, event.Status) - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go deleted file mode 100644 index 8c2c719e6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/example_test.go +++ /dev/null @@ -1,168 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker_test - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "log" - "time" - - "github.com/fsouza/go-dockerclient" -) - -func ExampleClient_AttachToContainer() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - client.SkipServerVersionCheck = true - // Reading logs from container a84849 and sending them to buf. - var buf bytes.Buffer - err = client.AttachToContainer(docker.AttachToContainerOptions{ - Container: "a84849", - OutputStream: &buf, - Logs: true, - Stdout: true, - Stderr: true, - }) - if err != nil { - log.Fatal(err) - } - log.Println(buf.String()) - buf.Reset() - err = client.AttachToContainer(docker.AttachToContainerOptions{ - Container: "a84849", - OutputStream: &buf, - Stdout: true, - Stream: true, - }) - if err != nil { - log.Fatal(err) - } - log.Println(buf.String()) -} - -func ExampleClient_CopyFromContainer() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - cid := "a84849" - var buf bytes.Buffer - filename := "/tmp/output.txt" - err = client.CopyFromContainer(docker.CopyFromContainerOptions{ - Container: cid, - Resource: filename, - OutputStream: &buf, - }) - if err != nil { - log.Fatalf("Error while copying from %s: %s\n", cid, err) - } - content := new(bytes.Buffer) - r := bytes.NewReader(buf.Bytes()) - tr := tar.NewReader(r) - tr.Next() - if err != nil && err != io.EOF { - log.Fatal(err) - } - if _, err := io.Copy(content, tr); err != nil { - log.Fatal(err) - } - log.Println(buf.String()) -} - -func ExampleClient_BuildImage() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - - t := time.Now() - inputbuf, outputbuf := bytes.NewBuffer(nil), bytes.NewBuffer(nil) - tr := tar.NewWriter(inputbuf) - tr.WriteHeader(&tar.Header{Name: "Dockerfile", Size: 10, ModTime: t, AccessTime: t, ChangeTime: t}) - tr.Write([]byte("FROM base\n")) - tr.Close() - opts := docker.BuildImageOptions{ - Name: "test", - InputStream: inputbuf, - OutputStream: outputbuf, - } - if err := client.BuildImage(opts); err != nil { - log.Fatal(err) - } -} - -func ExampleClient_ListenEvents() { - client, err := docker.NewClient("http://localhost:4243") - if err != nil { - log.Fatal(err) - } - - listener := make(chan *docker.APIEvents) - err = client.AddEventListener(listener) - if err != nil { - log.Fatal(err) - } - - defer func() { - - err = client.RemoveEventListener(listener) - if err != nil { - log.Fatal(err) - } - - }() - - timeout := time.After(1 * time.Second) - - for { - select { - case msg := <-listener: - log.Println(msg) - case <-timeout: - break - } - } - -} - -func ExampleEnv_Map() { - e := docker.Env([]string{"A=1", "B=2", "C=3"}) - envs := e.Map() - for k, v := range envs { - fmt.Printf("%s=%q\n", k, v) - } -} - -func ExampleEnv_SetJSON() { - type Person struct { - Name string - Age int - } - p := Person{Name: "Gopher", Age: 4} - var e docker.Env - err := e.SetJSON("person", p) - if err != nil { - log.Fatal(err) - } -} - -func ExampleEnv_GetJSON() { - type Person struct { - Name string - Age int - } - p := Person{Name: "Gopher", Age: 4} - var e docker.Env - e.Set("person", `{"name":"Gopher","age":4}`) - err := e.GetJSON("person", &p) - if err != nil { - log.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go deleted file mode 100644 index 84047f04e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" -) - -// Exec is the type representing a `docker exec` instance and containing the -// instance ID -type Exec struct { - ID string `json:"Id,omitempty" yaml:"Id,omitempty"` -} - -// CreateExecOptions specify parameters to the CreateExecContainer function. -// -// See https://goo.gl/1KSIb7 for more details -type CreateExecOptions struct { - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` -} - -// CreateExec sets up an exec instance in a running container `id`, returning the exec -// instance, or an error in case of failure. -// -// See https://goo.gl/1KSIb7 for more details -func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) { - path := fmt.Sprintf("/containers/%s/exec", opts.Container) - body, status, err := c.do("POST", path, doOptions{data: opts}) - if status == http.StatusNotFound { - return nil, &NoSuchContainer{ID: opts.Container} - } - if err != nil { - return nil, err - } - var exec Exec - err = json.Unmarshal(body, &exec) - if err != nil { - return nil, err - } - - return &exec, nil -} - -// StartExecOptions specify parameters to the StartExecContainer function. -// -// See https://goo.gl/iQCnto for more details -type StartExecOptions struct { - Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"` - - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - ErrorStream io.Writer `qs:"-"` - - // Use raw terminal? Usually true when the container contains a TTY. - RawTerminal bool `qs:"-"` - - // If set, after a successful connect, a sentinel will be sent and then the - // client will block on receive before continuing. - // - // It must be an unbuffered channel. Using a buffered channel can lead - // to unexpected behavior. - Success chan struct{} `json:"-"` -} - -// StartExec starts a previously set up exec instance id. If opts.Detach is -// true, it returns after starting the exec command. Otherwise, it sets up an -// interactive session with the exec command. -// -// See https://goo.gl/iQCnto for more details -func (c *Client) StartExec(id string, opts StartExecOptions) error { - if id == "" { - return &NoSuchExec{ID: id} - } - - path := fmt.Sprintf("/exec/%s/start", id) - - if opts.Detach { - _, status, err := c.do("POST", path, doOptions{data: opts}) - if status == http.StatusNotFound { - return &NoSuchExec{ID: id} - } - if err != nil { - return err - } - return nil - } - - return c.hijack("POST", path, hijackOptions{ - success: opts.Success, - setRawTerminal: opts.RawTerminal, - in: opts.InputStream, - stdout: opts.OutputStream, - stderr: opts.ErrorStream, - data: opts, - }) -} - -// ResizeExecTTY resizes the tty session used by the exec command id. This API -// is valid only if Tty was specified as part of creating and starting the exec -// command. -// -// See https://goo.gl/e1JpsA for more details -func (c *Client) ResizeExecTTY(id string, height, width int) error { - params := make(url.Values) - params.Set("h", strconv.Itoa(height)) - params.Set("w", strconv.Itoa(width)) - - path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode()) - _, _, err := c.do("POST", path, doOptions{}) - return err -} - -// ExecProcessConfig is a type describing the command associated to a Exec -// instance. It's used in the ExecInspect type. -type ExecProcessConfig struct { - Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"` - User string `json:"user,omitempty" yaml:"user,omitempty"` - Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"` - EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"` - Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"` -} - -// ExecInspect is a type with details about a exec instance, including the -// exit code if the command has finished running. It's returned by a api -// call to /exec/(id)/json -// -// See https://goo.gl/gPtX9R for more details -type ExecInspect struct { - ID string `json:"ID,omitempty" yaml:"ID,omitempty"` - Running bool `json:"Running,omitempty" yaml:"Running,omitempty"` - ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"` - OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"` - ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"` - Container Container `json:"Container,omitempty" yaml:"Container,omitempty"` -} - -// InspectExec returns low-level information about the exec command id. -// -// See https://goo.gl/gPtX9R for more details -func (c *Client) InspectExec(id string) (*ExecInspect, error) { - path := fmt.Sprintf("/exec/%s/json", id) - body, status, err := c.do("GET", path, doOptions{}) - if status == http.StatusNotFound { - return nil, &NoSuchExec{ID: id} - } - if err != nil { - return nil, err - } - var exec ExecInspect - err = json.Unmarshal(body, &exec) - if err != nil { - return nil, err - } - return &exec, nil -} - -// NoSuchExec is the error returned when a given exec instance does not exist. -type NoSuchExec struct { - ID string -} - -func (err *NoSuchExec) Error() string { - return "No such exec instance: " + err.ID -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go deleted file mode 100644 index 2dc8d2100..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go +++ /dev/null @@ -1,262 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/json" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "strings" - "testing" -) - -func TestExecCreate(t *testing.T) { - jsonContainer := `{"Id": "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2"}` - var expected struct{ ID string } - err := json.Unmarshal([]byte(jsonContainer), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonContainer, status: http.StatusOK} - client := newTestClient(fakeRT) - config := CreateExecOptions{ - Container: "test", - AttachStdin: true, - AttachStdout: true, - AttachStderr: false, - Tty: false, - Cmd: []string{"touch", "/tmp/file"}, - User: "a-user", - } - execObj, err := client.CreateExec(config) - if err != nil { - t.Fatal(err) - } - expectedID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - if execObj.ID != expectedID { - t.Errorf("ExecCreate: wrong ID. Want %q. Got %q.", expectedID, execObj.ID) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecCreate: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/containers/test/exec")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - var gotBody struct{ ID string } - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } -} - -func TestExecStartDetached(t *testing.T) { - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - config := StartExecOptions{ - Detach: true, - } - err := client.StartExec(execID, config) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/start")) - if gotPath := req.URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } - t.Log(req.Body) - var gotBody struct{ Detach bool } - err = json.NewDecoder(req.Body).Decode(&gotBody) - if err != nil { - t.Fatal(err) - } - if !gotBody.Detach { - t.Fatal("Expected Detach in StartExecOptions to be true") - } -} - -func TestExecStartAndAttach(t *testing.T) { - var reader = strings.NewReader("send value") - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Write([]byte{1, 0, 0, 0, 0, 0, 0, 5}) - w.Write([]byte("hello")) - })) - defer server.Close() - client, _ := NewClient(server.URL) - client.SkipServerVersionCheck = true - var stdout, stderr bytes.Buffer - success := make(chan struct{}) - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - opts := StartExecOptions{ - OutputStream: &stdout, - ErrorStream: &stderr, - InputStream: reader, - RawTerminal: true, - Success: success, - } - go func() { - if err := client.StartExec(execID, opts); err != nil { - t.Error(err) - } - }() - <-success -} - -func TestExecResize(t *testing.T) { - execID := "4fa6e0f0c6786287e131c3852c58a2e01cc697a68231826813597e4994f1d6e2" - fakeRT := &FakeRoundTripper{status: http.StatusOK} - client := newTestClient(fakeRT) - err := client.ResizeExecTTY(execID, 10, 20) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("ExecStart: wrong HTTP method. Want %q. Got %q.", "POST", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + execID + "/resize?h=10&w=20")) - if gotPath := req.URL.RequestURI(); gotPath != expectedURL.RequestURI() { - t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } -} - -func TestExecInspect(t *testing.T) { - jsonExec := `{ - "ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e", - "Running": true, - "ExitCode": 0, - "ProcessConfig": { - "privileged": false, - "user": "", - "tty": true, - "entrypoint": "bash", - "arguments": [] - }, - "OpenStdin": true, - "OpenStderr": true, - "OpenStdout": true, - "Container": { - "State": { - "Running": true, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Pid": 29392, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-01-21T17:08:59.634662178Z", - "FinishedAt": "0001-01-01T00:00:00Z" - }, - "ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521", - "Created": "2015-01-21T17:08:59.46407212Z", - "Path": "/bin/bash", - "Args": [ - "-lc", - "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" - ], - "Config": { - "Hostname": "922cd0568714", - "Domainname": "", - "User": "ubuntu", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 100, - "Cpuset": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "PortSpecs": null, - "ExposedPorts": { - "8888/tcp": {} - }, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - ], - "Cmd": [ - "/bin/bash", - "-lc", - "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null" - ], - "Image": "tsuru/app-gtest", - "Volumes": null, - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null - }, - "Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544", - "NetworkSettings": { - "IPAddress": "172.17.0.8", - "IPPrefixLen": 16, - "MacAddress": "02:42:ac:11:00:08", - "LinkLocalIPv6Address": "fe80::42:acff:fe11:8", - "LinkLocalIPv6PrefixLen": 64, - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "Gateway": "172.17.42.1", - "IPv6Gateway": "", - "Bridge": "docker0", - "PortMapping": null, - "Ports": { - "8888/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49156" - } - ] - } - }, - "ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf", - "HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname", - "HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts", - "Name": "/c7e43b72288ee9d0270a", - "Driver": "aufs", - "ExecDriver": "native-0.2", - "MountLabel": "", - "ProcessLabel": "", - "AppArmorProfile": "", - "RestartCount": 0, - "UpdateDns": false, - "Volumes": {}, - "VolumesRW": {} - } - }` - var expected ExecInspect - err := json.Unmarshal([]byte(jsonExec), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK} - client := newTestClient(fakeRT) - expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e" - execObj, err := client.InspectExec(expectedID) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*execObj, expected) { - t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method) - } - expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index a38715497..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,26 +0,0 @@ -# (Unreleased) - -logrus/core: improve performance of text formatter by 40% -logrus/core: expose `LevelHooks` type - -# 0.8.2 - -logrus: fix more Fatal family functions - -# 0.8.1 - -logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -logrus: defaults to stderr instead of stdout -hooks/sentry: add special field for `*http.Request` -formatter/text: ignore Windows for colors - -# 0.7.3 - -formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -formatter/text: Add configuration option for time format (#158) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 4be378476..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,355 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc] - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "github.com/Sirupsen/logrus/hooks/airbrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - log.AddHook(airbrake.NewHook("https://example.com", "xyz", "development")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` - - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go) | Send errors to an exception tracking service compatible with the Airbrake API. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go) | Send errors to the Papertrail hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [BugSnag](https://github.com/Sirupsen/logrus/blob/master/hooks/bugsnag/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/Sirupsen/logrus/blob/master/hooks/sentry/sentry.go) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-hooks/tree/master/graylog) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&logrus.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus_logstash.LogstashFormatter`. Logs fields as Logstash Events (http://logstash.net). - - ```go - logrus.SetFormatter(&logrus_logstash.LogstashFormatter{Type: “application_name"}) - ``` - -Third party logging formatters: - -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - - -[godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 699ea035c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,254 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := Fields{} - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -func (entry *Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go deleted file mode 100644 index cd90aa7dc..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" -) - -func TestEntryPanicln(t *testing.T) { - errBoom := fmt.Errorf("boom time") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicln("kaboom") -} - -func TestEntryPanicf(t *testing.T) { - errBoom := fmt.Errorf("boom again") - - defer func() { - p := recover() - assert.NotNil(t, p) - - switch pVal := p.(type) { - case *Entry: - assert.Equal(t, "kaboom true", pVal.Message) - assert.Equal(t, errBoom, pVal.Data["err"]) - default: - t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) - } - }() - - logger := New() - logger.Out = &bytes.Buffer{} - entry := NewEntry(logger) - entry.WithField("err", errBoom).Panicf("kaboom %v", true) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index a67e1b802..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,188 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go deleted file mode 100644 index c6d290c77..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/formatter_bench_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package logrus - -import ( - "fmt" - "testing" - "time" -) - -// smallFields is a small size data set for benchmarking -var smallFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", -} - -// largeFields is a large size data set for benchmarking -var largeFields = Fields{ - "foo": "bar", - "baz": "qux", - "one": "two", - "three": "four", - "five": "six", - "seven": "eight", - "nine": "ten", - "eleven": "twelve", - "thirteen": "fourteen", - "fifteen": "sixteen", - "seventeen": "eighteen", - "nineteen": "twenty", - "a": "b", - "c": "d", - "e": "f", - "g": "h", - "i": "j", - "k": "l", - "m": "n", - "o": "p", - "q": "r", - "s": "t", - "u": "v", - "w": "x", - "y": "z", - "this": "will", - "make": "thirty", - "entries": "yeah", -} - -var errorFields = Fields{ - "foo": fmt.Errorf("bar"), - "baz": fmt.Errorf("qux"), -} - -func BenchmarkErrorTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, errorFields) -} - -func BenchmarkSmallTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) -} - -func BenchmarkLargeTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) -} - -func BenchmarkSmallColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) -} - -func BenchmarkLargeColoredTextFormatter(b *testing.B) { - doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) -} - -func BenchmarkSmallJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, smallFields) -} - -func BenchmarkLargeJSONFormatter(b *testing.B) { - doBenchmark(b, &JSONFormatter{}, largeFields) -} - -func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { - entry := &Entry{ - Time: time.Time{}, - Level: InfoLevel, - Message: "message", - Data: fields, - } - var d []byte - var err error - for i := 0; i < b.N; i++ { - d, err = formatter.Format(entry) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(d))) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go deleted file mode 100644 index 938b97495..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hook_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package logrus - -import ( - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" -) - -type TestHook struct { - Fired bool -} - -func (hook *TestHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *TestHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookFires(t *testing.T) { - hook := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - assert.Equal(t, hook.Fired, false) - - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} - -type ModifyHook struct { -} - -func (hook *ModifyHook) Fire(entry *Entry) error { - entry.Data["wow"] = "whale" - return nil -} - -func (hook *ModifyHook) Levels() []Level { - return []Level{ - DebugLevel, - InfoLevel, - WarnLevel, - ErrorLevel, - FatalLevel, - PanicLevel, - } -} - -func TestHookCanModifyEntry(t *testing.T) { - hook := new(ModifyHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - }) -} - -func TestCanFireMultipleHooks(t *testing.T) { - hook1 := new(ModifyHook) - hook2 := new(TestHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook1) - log.Hooks.Add(hook2) - - log.WithField("wow", "elephant").Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["wow"], "whale") - assert.Equal(t, hook2.Fired, true) - }) -} - -type ErrorHook struct { - Fired bool -} - -func (hook *ErrorHook) Fire(entry *Entry) error { - hook.Fired = true - return nil -} - -func (hook *ErrorHook) Levels() []Level { - return []Level{ - ErrorLevel, - } -} - -func TestErrorHookShouldntFireOnInfo(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, false) - }) -} - -func TestErrorHookShouldFireOnError(t *testing.T) { - hook := new(ErrorHook) - - LogAndAssertJSON(t, func(log *Logger) { - log.Hooks.Add(hook) - log.Error("test") - }, func(fields Fields) { - assert.Equal(t, hook.Fired, true) - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5cf..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go deleted file mode 100644 index 1d7087325..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/json_formatter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package logrus - -import ( - "encoding/json" - "errors" - - "testing" -) - -func TestErrorNotLost(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["error"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["omg"] != "wild walrus" { - t.Fatal("Error field not set") - } -} - -func TestFieldClashWithTime(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("time", "right now!")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.time"] != "right now!" { - t.Fatal("fields.time not set to original time field") - } - - if entry["time"] != "0001-01-01T00:00:00Z" { - t.Fatal("time field not set to current time, was: ", entry["time"]) - } -} - -func TestFieldClashWithMsg(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("msg", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.msg"] != "something" { - t.Fatal("fields.msg not set to original msg field") - } -} - -func TestFieldClashWithLevel(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - entry := make(map[string]interface{}) - err = json.Unmarshal(b, &entry) - if err != nil { - t.Fatal("Unable to unmarshal formatted entry: ", err) - } - - if entry["fields.level"] != "something" { - t.Fatal("fields.level not set to original level field") - } -} - -func TestJSONEntryEndsWithNewline(t *testing.T) { - formatter := &JSONFormatter{} - - b, err := formatter.Format(WithField("level", "something")) - if err != nil { - t.Fatal("Unable to format entry: ", err) - } - - if b[len(b)-1] != '\n' { - t.Fatal("Expected JSON log entry to end with a newline") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index e4974bfbe..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,206 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index 43ee12e90..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,94 +0,0 @@ -package logrus - -import ( - "fmt" - "log" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch lvl { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go deleted file mode 100644 index e8719b090..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package logrus - -import ( - "bytes" - "encoding/json" - "strconv" - "strings" - "sync" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/stretchr/testify/assert" -) - -func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - log(logger) - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assertions(fields) -} - -func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { - var buffer bytes.Buffer - - logger := New() - logger.Out = &buffer - logger.Formatter = &TextFormatter{ - DisableColors: true, - } - - log(logger) - - fields := make(map[string]string) - for _, kv := range strings.Split(buffer.String(), " ") { - if !strings.Contains(kv, "=") { - continue - } - kvArr := strings.Split(kv, "=") - key := strings.TrimSpace(kvArr[0]) - val := kvArr[1] - if kvArr[1][0] == '"' { - var err error - val, err = strconv.Unquote(val) - assert.NoError(t, err) - } - fields[key] = val - } - assertions(fields) -} - -func TestPrint(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Print("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestInfo(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "info") - }) -} - -func TestWarn(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Warn("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["level"], "warning") - }) -} - -func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test test") - }) -} - -func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test 10") - }) -} - -func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Infoln(10, 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "10 10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", 10) - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test10") - }) -} - -func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.Info("test", "test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "testtest") - }) -} - -func TestWithFieldsShouldAllowAssignments(t *testing.T) { - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - localLog := logger.WithFields(Fields{ - "key1": "value1", - }) - - localLog.WithField("key2", "value2").Info("test") - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - assert.Equal(t, "value2", fields["key2"]) - assert.Equal(t, "value1", fields["key1"]) - - buffer = bytes.Buffer{} - fields = Fields{} - localLog.Info("test") - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.Nil(t, err) - - _, ok := fields["key2"] - assert.Equal(t, false, ok) - assert.Equal(t, "value1", fields["key1"]) -} - -func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - }) -} - -func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("msg", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["msg"], "test") - assert.Equal(t, fields["fields.msg"], "hello") - }) -} - -func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("time", "hello").Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["fields.time"], "hello") - }) -} - -func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { - LogAndAssertJSON(t, func(log *Logger) { - log.WithField("level", 1).Info("test") - }, func(fields Fields) { - assert.Equal(t, fields["level"], "info") - assert.Equal(t, fields["fields.level"], 1.0) // JSON has floats only - }) -} - -func TestDefaultFieldsAreNotPrefixed(t *testing.T) { - LogAndAssertText(t, func(log *Logger) { - ll := log.WithField("herp", "derp") - ll.Info("hello") - ll.Info("bye") - }, func(fields map[string]string) { - for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { - if _, ok := fields[fieldName]; ok { - t.Fatalf("should not have prefixed %q: %v", fieldName, fields) - } - } - }) -} - -func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { - - var buffer bytes.Buffer - var fields Fields - - logger := New() - logger.Out = &buffer - logger.Formatter = new(JSONFormatter) - - llog := logger.WithField("context", "eating raw fish") - - llog.Info("looks delicious") - - err := json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded first message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "looks delicious") - assert.Equal(t, fields["context"], "eating raw fish") - - buffer.Reset() - - llog.Warn("omg it is!") - - err = json.Unmarshal(buffer.Bytes(), &fields) - assert.NoError(t, err, "should have decoded second message") - assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") - assert.Equal(t, fields["msg"], "omg it is!") - assert.Equal(t, fields["context"], "eating raw fish") - assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") - -} - -func TestConvertLevelToString(t *testing.T) { - assert.Equal(t, "debug", DebugLevel.String()) - assert.Equal(t, "info", InfoLevel.String()) - assert.Equal(t, "warning", WarnLevel.String()) - assert.Equal(t, "error", ErrorLevel.String()) - assert.Equal(t, "fatal", FatalLevel.String()) - assert.Equal(t, "panic", PanicLevel.String()) -} - -func TestParseLevel(t *testing.T) { - l, err := ParseLevel("panic") - assert.Nil(t, err) - assert.Equal(t, PanicLevel, l) - - l, err = ParseLevel("fatal") - assert.Nil(t, err) - assert.Equal(t, FatalLevel, l) - - l, err = ParseLevel("error") - assert.Nil(t, err) - assert.Equal(t, ErrorLevel, l) - - l, err = ParseLevel("warn") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("warning") - assert.Nil(t, err) - assert.Equal(t, WarnLevel, l) - - l, err = ParseLevel("info") - assert.Nil(t, err) - assert.Equal(t, InfoLevel, l) - - l, err = ParseLevel("debug") - assert.Nil(t, err) - assert.Equal(t, DebugLevel, l) - - l, err = ParseLevel("invalid") - assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) -} - -func TestGetSetLevelRace(t *testing.T) { - wg := sync.WaitGroup{} - for i := 0; i < 100; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - if i%2 == 0 { - SetLevel(InfoLevel) - } else { - GetLevel() - } - }(i) - - } - wg.Wait() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go deleted file mode 100644 index 0428ee5d5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_freebsd.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin. -*/ -package logrus - -import ( - "syscall" -) - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]uint8 - Ispeed uint32 - Ospeed uint32 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40db..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b8bebc13e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go deleted file mode 100644 index af609a53d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_openbsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 2e09f6f7e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stdout - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 2e6fe1bdd..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,158 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - if f.TimestampFormat == "" { - f.TimestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(f.TimestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - f.appendKeyValue(b, "msg", entry.Message) - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(f.TimestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go deleted file mode 100644 index e25a44f67..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/text_formatter_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package logrus - -import ( - "bytes" - "errors" - "testing" - "time" -) - -func TestQuoting(t *testing.T) { - tf := &TextFormatter{DisableColors: true} - - checkQuoting := func(q bool, value interface{}) { - b, _ := tf.Format(WithField("test", value)) - idx := bytes.Index(b, ([]byte)("test=")) - cont := bytes.Contains(b[idx+5:], []byte{'"'}) - if cont != q { - if q { - t.Errorf("quoting expected for: %#v", value) - } else { - t.Errorf("quoting not expected for: %#v", value) - } - } - } - - checkQuoting(false, "abcd") - checkQuoting(false, "v1.0") - checkQuoting(false, "1234567890") - checkQuoting(true, "/foobar") - checkQuoting(true, "x y") - checkQuoting(true, "x,y") - checkQuoting(false, errors.New("invalid")) - checkQuoting(true, errors.New("invalid argument")) -} - -func TestTimestampFormat(t *testing.T) { - checkTimeStr := func(format string) { - customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} - customStr, _ := customFormatter.Format(WithField("test", "test")) - timeStart := bytes.Index(customStr, ([]byte)("time=")) - timeEnd := bytes.Index(customStr, ([]byte)("level=")) - timeStr := customStr[timeStart+5 : timeEnd-1] - if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { - timeStr = timeStr[1 : len(timeStr)-1] - } - if format == "" { - format = time.RFC3339 - } - _, e := time.Parse(format, (string)(timeStr)) - if e != nil { - t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) - } - } - - checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") - checkTimeStr("Mon Jan _2 15:04:05 2006") - checkTimeStr("") -} - -// TODO add tests for sorting etc., this requires a parser for the text -// formatter output. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c75..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go deleted file mode 100644 index b854227e8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile.go +++ /dev/null @@ -1,62 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "os" - "regexp" - "strings" -) - -var ( - // EnvironmentVariableRegexp A regexp to validate correct environment variables - // Environment variables set by the user must have a name consisting solely of - // alphabetics, numerics, and underscores - the first of which must not be numeric. - EnvironmentVariableRegexp = regexp.MustCompile("^[[:alpha:]_][[:alpha:][:digit:]_]*$") -) - -// ParseEnvFile Read in a line delimited file with environment variables enumerated -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - for scanner.Scan() { - line := scanner.Text() - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - - if !EnvironmentVariableRegexp.MatchString(variable) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", variable)} - } - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go deleted file mode 100644 index cd0ca8f32..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/envfile_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expected_lines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected a ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected a ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' is not a valid environment variable" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exeeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - - if err == nil { - t.Fatalf("Expected a ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected a ErrBadEnvvariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' is not a valid environment variable" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go deleted file mode 100644 index a29335e60..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go deleted file mode 100644 index 55eac2aac..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build windows - -package opts - -import "fmt" - -var DefaultHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go deleted file mode 100644 index b1f958755..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go +++ /dev/null @@ -1,35 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IpOpt type that hold an IP -type IpOpt struct { - *net.IP -} - -func NewIpOpt(ref *net.IP, defaultVal string) *IpOpt { - o := &IpOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -func (o *IpOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -func (o *IpOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go deleted file mode 100644 index b6b526a57..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package opts - -import ( - "net" - "testing" -) - -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP - - for _, address := range addresses { - stringAddress := NewIpOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) - } - } -} - -func TestNewIpOptInvalidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "Not an ip" - - ipOpt := NewIpOpt(&ip, defaultVal) - - expected := "127.0.0.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestNewIpOptValidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "192.168.1.1" - - ipOpt := NewIpOpt(&ip, defaultVal) - - expected := "192.168.1.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestIpOptSetInvalidVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IpOpt{IP: &ip} - - invalidIp := "invalid ip" - expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIp) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go deleted file mode 100644 index aa409b99e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go +++ /dev/null @@ -1,323 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "os" - "path" - "regexp" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker -d -H tcp://:8080 - DefaultHTTPHost = "127.0.0.1" - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker -d -H tcp:// - // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter - // is not supplied. A better longer term solution would be to use a named - // pipe as the default on the Windows daemon. - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" -) - -// ListOpts type that hold a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts Create a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) -} - -// Set validates if needed the input value and add it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete remove the given element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -// FIXME: can we remove this? -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values' slice. -// FIXME: Can we remove this? -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// Get checks the existence of the given key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -//MapOpts type that holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// ValidatorFctType validator that return a validate string and/or an error -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType validator that return a validate list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateAttach Validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateLink Validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := parsers.ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidateDevice Validate a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -func ValidateDevice(val string) (string, error) { - return validatePath(val, false) -} - -// ValidatePath Validate a path for volumes -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:rw|ro] -// It will also validate the mount mode. -func ValidatePath(val string) (string, error) { - return validatePath(val, true) -} - -func validatePath(val string, validateMountMode bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for volumes: %s", val) - } - - splited := strings.SplitN(val, ":", 3) - if splited[0] == "" { - return val, fmt.Errorf("bad format for volumes: %s", val) - } - switch len(splited) { - case 1: - containerPath = splited[0] - val = path.Clean(containerPath) - case 2: - if isValid, _ := volume.ValidateMountMode(splited[1]); validateMountMode && isValid { - containerPath = splited[0] - mode = splited[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = splited[1] - val = fmt.Sprintf("%s:%s", splited[0], path.Clean(containerPath)) - } - case 3: - containerPath = splited[1] - mode = splited[2] - if isValid, _ := volume.ValidateMountMode(splited[2]); validateMountMode && !isValid { - return val, fmt.Errorf("bad mount mode specified : %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", splited[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// ValidateEnv Validate an environment variable and returns it -// It will use EnvironmentVariableRegexp to ensure the name of the environment variable is valid. -// If no value is specified, it returns the current value using os.Getenv. -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - if !EnvironmentVariableRegexp.MatchString(arr[0]) { - return val, ErrBadEnvVariable{fmt.Sprintf("variable '%s' is not a valid environment variable", val)} - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -// ValidateIPAddress Validates an Ip address -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateMACAddress Validates a MAC address -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - -// ValidateDNSSearch Validates domain for resolvconf search configuration. -// A zero length domain is represented by . -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateExtraHost Validate that the given string is a valid extrahost and returns it -// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateLabel Validate that the given string is a valid label, and returns it -// Labels are in the form on key=value -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateHost Validate that the given string is a valid host and returns it -func ValidateHost(val string) (string, error) { - host, err := parsers.ParseHost(DefaultHTTPHost, DefaultUnixSocket, val) - if err != nil { - return val, err - } - return host, nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go deleted file mode 100644 index f08df30be..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_test.go +++ /dev/null @@ -1,479 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "strings" - "testing" -) - -func TestValidateIPAddress(t *testing.T) { - if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) - } - -} - -func TestMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewMapOpts(tmpMap, logOptsValidator) - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - - o.Set("max-file=2") - if len(tmpMap) != 2 { - t.Errorf("map length %d != 2", len(tmpMap)) - } - - if tmpMap["max-file"] != "2" { - t.Errorf("max-file = %s != 2", tmpMap["max-file"]) - } - - if tmpMap["max-size"] != "1" { - t.Errorf("max-size = %s != 1", tmpMap["max-size"]) - } - if o.Set("dummy-val=3") == nil { - t.Errorf("validator is not being called") - } -} - -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} - -func TestListOptsWithoutValidator(t *testing.T) { - o := NewListOpts(nil) - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - o.Set("bar") - if o.Len() != 2 { - t.Errorf("%d != 2", o.Len()) - } - o.Set("bar") - if o.Len() != 3 { - t.Errorf("%d != 3", o.Len()) - } - if !o.Get("bar") { - t.Error("o.Get(\"bar\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("foo") - if o.String() != "[bar bar]" { - t.Errorf("%s != [bar bar]", o.String()) - } - listOpts := o.GetAll() - if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { - t.Errorf("Expected [[bar bar]], got [%v]", listOpts) - } - mapListOpts := o.GetMap() - if len(mapListOpts) != 1 { - t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) - } - -} - -func TestListOptsWithValidator(t *testing.T) { - // Re-using logOptsvalidator (used by MapOpts) - o := NewListOpts(logOptsValidator) - o.Set("foo") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("foo=bar") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("max-file=2") - if o.Len() != 1 { - t.Errorf("%d != 1", o.Len()) - } - if !o.Get("max-file=2") { - t.Error("o.Get(\"max-file=2\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("max-file=2") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } -} - -func TestValidateDNSSearch(t *testing.T) { - valid := []string{ - `.`, - `a`, - `a.`, - `1.foo`, - `17.foo`, - `foo.bar`, - `foo.bar.baz`, - `foo.bar.`, - `foo.bar.baz`, - `foo1.bar2`, - `foo1.bar2.baz`, - `1foo.2bar.`, - `1foo.2bar.baz`, - `foo-1.bar-2`, - `foo-1.bar-2.baz`, - `foo-1.bar-2.`, - `foo-1.bar-2.baz`, - `1-foo.2-bar`, - `1-foo.2-bar.baz`, - `1-foo.2-bar.`, - `1-foo.2-bar.baz`, - } - - invalid := []string{ - ``, - ` `, - ` `, - `17`, - `17.`, - `.17`, - `17-.`, - `17-.foo`, - `.foo`, - `foo-.bar`, - `-foo.bar`, - `foo.bar-`, - `foo.bar-.baz`, - `foo.-bar`, - `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, - } - - for _, domain := range valid { - if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } - - for _, domain := range invalid { - if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := ValidateAttach("invalid"); err == nil { - t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := ValidateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - -func TestValidatePath(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:ro", - "/hostPath:/containerPath:rw", - "/rw:/ro", - "/path:rw", - "/path:ro", - "/rw:rw", - } - invalid := map[string]string{ - "": "bad format for volumes: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for volumes: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for volumes: :test", - ":/test": "bad format for volumes: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for volumes: :test:", - "::": "bad format for volumes: ::", - ":::": "bad format for volumes: :::", - "/tmp:::": "bad format for volumes: /tmp:::", - ":/tmp::": "bad format for volumes: :/tmp::", - "path:ro": "path is not an absolute path", - "/path:/path:sw": "bad mount mode specified : sw", - "/path:/path:rwz": "bad mount mode specified : rwz", - } - - for _, path := range valid { - if _, err := ValidatePath(path); err != nil { - t.Fatalf("ValidatePath(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidatePath(path); err == nil { - t.Fatalf("ValidatePath(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidatePath(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:ro", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for volumes: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for volumes: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for volumes: :test", - ":/test": "bad format for volumes: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for volumes: :test:", - "::": "bad format for volumes: ::", - ":::": "bad format for volumes: :::", - "/tmp:::": "bad format for volumes: /tmp:::", - ":/tmp::": "bad format for volumes: :/tmp::", - "path:ro": "ro is not an absolute path", - } - - for _, path := range valid { - if _, err := ValidateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} - -func TestValidateEnv(t *testing.T) { - invalids := map[string]string{ - "some spaces": "poorly formatted environment: variable 'some spaces' is not a valid environment variable", - "asd!qwe": "poorly formatted environment: variable 'asd!qwe' is not a valid environment variable", - "1asd": "poorly formatted environment: variable '1asd' is not a valid environment variable", - "123": "poorly formatted environment: variable '123' is not a valid environment variable", - } - valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - } - for value, expectedError := range invalids { - _, err := ValidateEnv(value) - if err == nil { - t.Fatalf("Expected ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected ErrBadEnvVariable, got [%s]", err) - } - if err.Error() != expectedError { - t.Fatalf("Expected ErrBadEnvVariable with message [%s], got [%s]", expectedError, err.Error()) - } - } - for value, expected := range valids { - actual, err := ValidateEnv(value) - if err != nil { - t.Fatal(err) - } - if actual != expected { - t.Fatalf("Expected [%v], got [%v]", expected, actual) - } - } -} - -func TestValidateLabel(t *testing.T) { - if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { - t.Fatalf("Expected an error [bad attribute format: label], go %v", err) - } - if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { - t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) - } - // Validate it's working with more than one = - if actual, err := ValidateLabel("key1=value1=value2"); err != nil { - t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) - } - // Validate it's working with one more - if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { - t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) - } -} - -func TestValidateHost(t *testing.T) { - invalid := map[string]string{ - "anything": "Invalid bind address format: anything", - "something with spaces": "Invalid bind address format: something with spaces", - "://": "Invalid bind address format: ://", - "unknown://": "Invalid bind address format: unknown://", - "tcp://": "Invalid proto, expected tcp: ", - "tcp://:port": "Invalid bind address format: :port", - "tcp://invalid": "Invalid bind address format: invalid", - "tcp://invalid:port": "Invalid bind address format: invalid:port", - } - valid := map[string]string{ - "fd://": "fd://", - "fd://something": "fd://something", - "tcp://:2375": "tcp://127.0.0.1:2375", // default ip address - "tcp://:2376": "tcp://127.0.0.1:2376", // default ip address - "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", - "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", - "tcp://192.168:8080": "tcp://192.168:8080", - "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P - "tcp://docker.com:2375": "tcp://docker.com:2375", - "unix://": "unix:///var/run/docker.sock", // default unix:// value - "unix://path/to/socket": "unix://path/to/socket", - } - - for value, errorMessage := range invalid { - if _, err := ValidateHost(value); err == nil || err.Error() != errorMessage { - t.Fatalf("Expected an error for %v with [%v], got [%v]", value, errorMessage, err) - } - } - for value, expected := range valid { - if actual, err := ValidateHost(value); err != nil || actual != expected { - t.Fatalf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) - } - } -} - -func logOptsValidator(val string) (string, error) { - allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} - vals := strings.Split(val, "=") - if allowedKeys[vals[0]] != "" { - return val, nil - } - return "", fmt.Errorf("invalid key %s", vals[0]) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 54f6c4e3f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,47 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" -) - -type UlimitOpt struct { - values *map[string]*ulimit.Ulimit -} - -func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*ulimit.Ulimit{} - } - return &UlimitOpt{ref} -} - -func (o *UlimitOpt) Set(val string) error { - l, err := ulimit.Parse(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -func (o *UlimitOpt) GetList() []*ulimit.Ulimit { - var ulimits []*ulimit.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go deleted file mode 100644 index ad284e754..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" -) - -func TestUlimitOpt(t *testing.T) { - ulimitMap := map[string]*ulimit.Ulimit{ - "nofile": {"nofile", 1024, 512}, - } - - ulimitOpt := NewUlimitOpt(&ulimitMap) - - expected := "[nofile=512:1024]" - if ulimitOpt.String() != expected { - t.Fatalf("Expected %v, got %v", expected, ulimitOpt) - } - - // Valid ulimit append to opts - if err := ulimitOpt.Set("core=1024:1024"); err != nil { - t.Fatal(err) - } - - // Invalid ulimit type returns an error and do not append to opts - if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { - t.Fatalf("Expected error on invalid ulimit type") - } - expected = "[nofile=512:1024 core=1024:1024]" - expected2 := "[core=1024:1024 nofile=512:1024]" - result := ulimitOpt.String() - if result != expected && result != expected2 { - t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) - } - - // And test GetList - ulimits := ulimitOpt.GetList() - if len(ulimits) != 2 { - t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md deleted file mode 100644 index 7307d9694..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go deleted file mode 100644 index 7306840b6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go +++ /dev/null @@ -1,902 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -type ( - Archive io.ReadCloser - ArchiveReader io.Reader - Compression int - TarChownOptions struct { - UID, GID int - } - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - ChownOpts *TarChownOptions - Name string - IncludeSourceDir bool - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar} -) - -const ( - Uncompressed Compression = iota - Bzip2 - Gzip - Xz -) - -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debugf("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return CmdStream(exec.Command(args[0], args[1:]...), archive) -} - -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil { - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return readBufWrapper, nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - nlink, inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's a regular file and has more than 1 link, - // it's hardlinked, so set the type flag accordingly - if fi.Mode().IsRegular() && nlink > 1 { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg { - file, err := os.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debugf("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - return err - } - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - // syscall.UtimesNano doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.UtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } else { - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - } - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Debugf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Debugf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - var renamedRelFilePath string // For when tar.Options.Name is set - for _, include := range options.IncludeFiles { - // We can't use filepath.Join(srcPath, include) because this will - // clean away a trailing "." or "/" which may be important. - walkRoot := strings.Join([]string{srcPath, include}, string(filepath.Separator)) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) - if err != nil { - logrus.Debugf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - if !exceptions && f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // TODO Windows: Verify if this needs to be os.Pathseparator - // Rename the base resource - if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) { - renamedRelFilePath = relFilePath - } - // Set this to make sure the items underneath also get renamed - if options.Name != "" { - relFilePath = strings.Replace(relFilePath, renamedRelFilePath, options.Name, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", filePath, err) - } - return nil - }) - } - }() - - return pipeReader, nil -} - -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0777) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - var r io.Reader = tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - return archiver.Untar(archive, dst, nil) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - if err := archiver.Untar(archive, dst, nil); err != nil { - return err - } - return nil -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := system.MkdirAll(dst, 0755); err != nil && !os.IsExist(err) { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil && !os.IsExist(err) { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - return archiver.Untar(r, filepath.Dir(dst), nil) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// CmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func CmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { - if input != nil { - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - // Write stdin if any - go func() { - io.Copy(stdin, input) - stdin.Close() - }() - } - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, err - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, err - } - pipeR, pipeW := io.Pipe() - errChan := make(chan []byte) - // Collect stderr, we will use it in case of an error - go func() { - errText, e := ioutil.ReadAll(stderr) - if e != nil { - errText = []byte("(...couldn't fetch stderr: " + e.Error() + ")") - } - errChan <- errText - }() - // Copy stdout to the returned pipe - go func() { - _, err := io.Copy(pipeW, stdout) - if err != nil { - pipeW.CloseWithError(err) - } - errText := <-errChan - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errText)) - } else { - pipeW.Close() - } - }() - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, err - } - return pipeR, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go deleted file mode 100644 index 4bb4f6ff5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_test.go +++ /dev/null @@ -1,1204 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "syscall" - "testing" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func TestIsArchiveNilHeader(t *testing.T) { - out := IsArchive(nil) - if out { - t.Fatalf("isArchive should return false as nil is not a valid archive header") - } -} - -func TestIsArchiveInvalidHeader(t *testing.T) { - header := []byte{0x00, 0x01, 0x02} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is not a valid archive header", header) - } -} - -func TestIsArchiveBzip2(t *testing.T) { - header := []byte{0x42, 0x5A, 0x68} - out := IsArchive(header) - if !out { - t.Fatalf("isArchive should return true as %s is a bz2 header", header) - } -} - -func TestIsArchive7zip(t *testing.T) { - header := []byte{0x50, 0x4b, 0x03, 0x04} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) - } -} - -func TestDecompressStreamGzip(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.gz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a gzip file.") - } -} - -func TestDecompressStreamBzip2(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.bz2") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a bzip2 file.") - } -} - -func TestDecompressStreamXz(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open("/tmp/archive.xz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a xz file.") - } -} - -func TestCompressStreamXzUnsuported(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamBzip2Unsupported(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamInvalid(t *testing.T) { - dest, err := os.Create("/tmp/dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, -1) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestExtensionInvalid(t *testing.T) { - compression := Compression(-1) - output := compression.Extension() - if output != "" { - t.Fatalf("The extension of an invalid compression should be an empty string.") - } -} - -func TestExtensionUncompressed(t *testing.T) { - compression := Uncompressed - output := compression.Extension() - if output != "tar" { - t.Fatalf("The extension of a uncompressed archive should be 'tar'.") - } -} -func TestExtensionBzip2(t *testing.T) { - compression := Bzip2 - output := compression.Extension() - if output != "tar.bz2" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") - } -} -func TestExtensionGzip(t *testing.T) { - compression := Gzip - output := compression.Extension() - if output != "tar.gz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") - } -} -func TestExtensionXz(t *testing.T) { - compression := Xz - output := compression.Extension() - if output != "tar.xz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") - } -} - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, err := CmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - badCmd := exec.Command("/bin/sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, err := CmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("/bin/sh", "-c", "echo hello; exit 0") - out, err := CmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestUntarPathWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - invalidDestFolder := path.Join(tempFolder, "invalidDest") - // Create a src file - srcFile := path.Join(tempFolder, "src") - _, err = os.Create(srcFile) - if err != nil { - t.Fatalf("Fail to create the source file") - } - err = UntarPath(srcFile, invalidDestFolder) - if err == nil { - t.Fatalf("UntarPath with invalid destination path should throw an error.") - } -} - -func TestUntarPathWithInvalidSrc(t *testing.T) { - dest, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer os.RemoveAll(dest) - err = UntarPath("/invalid/path", dest) - if err == nil { - t.Fatalf("UntarPath with invalid src path should throw an error.") - } -} - -func TestUntarPath(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(path.Join(tmpFolder, "src")) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := path.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath shouldn't throw an error, %s.", err) - } - expectedFile := path.Join(destFolder, srcFile) - _, err = os.Stat(expectedFile) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -// Do the same test as above but with the destination as file, it should fail -func TestUntarPathWithDestinationFile(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(path.Join(tmpFolder, "src")) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFile := path.Join(tmpFolder, "dest") - _, err = os.Create(destFile) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFile) - if err == nil { - t.Fatalf("UntarPath should throw an error if the destination if a file") - } -} - -// Do the same test as above but with the destination folder already exists -// and the destination file is a directory -// It's working, see https://github.com/docker/docker/issues/10040 -func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := path.Join(tmpFolder, "src") - tarFile := path.Join(tmpFolder, "src.tar") - os.Create(srcFile) - cmd := exec.Command("/bin/sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := path.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination folder") - } - // Let's create a folder that will has the same path as the extracted file (from tar) - destSrcFileAsFolder := path.Join(destFolder, srcFile) - err = os.MkdirAll(destSrcFileAsFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") - } -} - -func TestCopyWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - destFolder := path.Join(tempFolder, "dest") - invalidSrc := path.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(invalidSrc, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - srcFolder := path.Join(tempFolder, "src") - inexistentDestFolder := path.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(srcFolder, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } -} - -// Test CopyWithTar with a file as src -func TestCopyWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - srcFolder := path.Join(folder, "src") - src := path.Join(folder, path.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content - if err != nil { - t.Fatalf("Destination file should be the same as the source.") - } -} - -// Test CopyWithTar with a folder as src -func TestCopyWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - src := path.Join(folder, path.Join("src", "folder")) - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(path.Join(src, "file"), []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content (the file inside) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestCopyFileWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - destFolder := path.Join(tempFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - invalidFile := path.Join(tempFolder, "doesnotexists") - err = CopyFileWithTar(invalidFile, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - defer os.RemoveAll(tempFolder) - srcFile := path.Join(tempFolder, "src") - inexistentDestFolder := path.Join(tempFolder, "doesnotexists") - _, err = os.Create(srcFile) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(srcFile, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } - // FIXME Test the src file and content -} - -func TestCopyFileWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - src := path.Join(folder, "srcfolder") - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(src, dest) - if err == nil { - t.Fatalf("CopyFileWithTar should throw an error with a folder.") - } -} - -func TestCopyFileWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := path.Join(folder, "dest") - srcFolder := path.Join(folder, "src") - src := path.Join(folder, path.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest+"/") - if err != nil { - t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestTarFiles(t *testing.T) { - // try without hardlinks - if err := checkNoChanges(1000, false); err != nil { - t.Fatal(err) - } - // try with hardlinks - if err := checkNoChanges(1000, true); err != nil { - t.Fatal(err) - } -} - -func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - return err - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - return err - } - defer os.RemoveAll(destDir) - - _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) - if err != nil { - return err - } - - err = TarUntar(srcDir, destDir) - if err != nil { - return err - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) - } - return nil -} - -func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { - archive, err := TarWithOptions(origin, options) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return nil, err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - compression := options.Compression - if detectedCompression.Extension() != compression.Extension() { - return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return nil, err - } - if _, err := os.Stat(tmp); err != nil { - return nil, err - } - - return ChangesDirs(origin, tmp) -} - -func TestTarUntar(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - } -} - -func TestTarUntarWithXattr(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Lsetxattr(path.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - capability, _ := system.Lgetxattr(path.Join(origin, "2"), "security.capability") - if capability == nil && capability[0] != 0x00 { - t.Fatalf("Untar should have kept the 'security.capability' xattr.") - } - } -} - -func TestTarWithOptions(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - if _, err := ioutil.TempDir(origin, "folder"); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - cases := []struct { - opts *TarOptions - numChanges int - }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 2}, - {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, - {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, - {&TarOptions{Name: "test", IncludeFiles: []string{"1"}}, 4}, - } - for _, testCase := range cases { - changes, err := tarUntar(t, origin, testCase.opts) - if err != nil { - t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) - } - if len(changes) != testCase.numChanges { - t.Errorf("Expected %d changes, got %d for %+v:", - testCase.numChanges, len(changes), testCase.opts) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) - if err != nil { - t.Fatal(err) - } -} - -// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. -// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. -func TestUntarUstarGnuConflict(t *testing.T) { - f, err := os.Open("testdata/broken.tar") - if err != nil { - t.Fatal(err) - } - found := false - tr := tar.NewReader(f) - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - t.Fatal(err) - } - if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { - found = true - break - } - } - if !found { - t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") - } -} - -func TestTarWithBlockCharFifo(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(path.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(origin, dest) - if err != nil { - t.Fatal(err) - } - if len(changes) > 0 { - t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) - } -} - -func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(path.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := os.Link(path.Join(origin, "1"), path.Join(origin, "2")); err != nil { - t.Fatal(err) - } - - var i1, i2 uint64 - if i1, err = getNlink(path.Join(origin, "1")); err != nil { - t.Fatal(err) - } - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - if i1, err = getInode(path.Join(dest, "1")); err != nil { - t.Fatal(err) - } - if i2, err = getInode(path.Join(dest, "2")); err != nil { - t.Fatal(err) - } - - if i1 != i2 { - t.Errorf("expected matching inodes, but got %d and %d", i1, i2) - } -} - -func getNlink(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - // We need this conversion on ARM64 - return uint64(statT.Nlink), nil -} - -func getInode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Ino, nil -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, false) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := path.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, true) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func TestUntarInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarHardlinkToSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "symlink1", - Typeflag: tar.TypeSymlink, - Linkname: "regfile", - Mode: 0644, - }, - { - Name: "symlink2", - Typeflag: tar.TypeLink, - Linkname: "symlink1", - Mode: 0644, - }, - { - Name: "regfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try writing to victim/newdir/newfile with a symlink in the path - { - // this header needs to be before the next one, or else there is an error - Name: "dir/loophole", - Typeflag: tar.TypeSymlink, - Linkname: "../../victim", - Mode: 0755, - }, - { - Name: "dir/loophole/newdir/newfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestTempArchiveCloseMultipleTimes(t *testing.T) { - reader := ioutil.NopCloser(strings.NewReader("hello")) - tempArchive, err := NewTempArchive(reader, "") - buf := make([]byte, 10) - n, err := tempArchive.Read(buf) - if n != 5 { - t.Fatalf("Expected to read 5 bytes. Read %d instead", n) - } - for i := 0; i < 3; i++ { - if err = tempArchive.Close(); err != nil { - t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go deleted file mode 100644 index 5c754373f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - nlink = uint32(s.Nlink) - inode = uint64(s.Ino) - - // Currently go does not fil in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go deleted file mode 100644 index 18f45c480..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "testing" -) - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct{ in, expected string }{ - {"foo", "foo"}, - {"foo/bar", "foo/bar"}, - {"foo/dir/", "foo/dir/"}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {"foo/bar", false, "foo/bar"}, - {"foo/bar", true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0000}, - {0777, 0777}, - {0644, 0644}, - {0755, 0755}, - {0444, 0444}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go deleted file mode 100644 index 10db4bd00..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "strings" -) - -// canonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (nlink uint32, inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go deleted file mode 100644 index 72bc71e06..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build windows - -package archive - -import ( - "os" - "testing" -) - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct { - in, expected string - shouldFail bool - }{ - {"foo", "foo", false}, - {"foo/bar", "___", true}, // unix-styled windows path must fail - {`foo\bar`, "foo/bar", false}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) - } else if !v.shouldFail && out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {`foo\bar`, false, "foo/bar"}, - {`foo\bar`, true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0111}, - {0777, 0755}, - {0644, 0755}, - {0755, 0755}, - {0444, 0555}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go deleted file mode 100644 index c7838e859..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go +++ /dev/null @@ -1,383 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -type ChangeType int - -const ( - ChangeModify = iota - ChangeAdd - ChangeDelete -) - -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - // Skip AUFS metadata - if matched, err := filepath.Match(string(os.PathSeparator)+".wh..wh.*", path); err != nil || matched { - return err - } - - change := Change{ - Path: path, - } - - // Find out what kind of modification happened - file := filepath.Base(path) - // If there is a whiteout, then the file was removed - if strings.HasPrefix(file, ".wh.") { - originalFile := file[len(".wh."):] - change.Path = filepath.Join(filepath.Dir(path), originalFile) - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -type FileInfo struct { - parent *FileInfo - name string - stat *system.Stat_t - children map[string]*FileInfo - capability []byte - added bool -} - -func (root *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := root - if path == string(os.PathSeparator) { - return root - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var size int64 - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, _ := os.Lstat(file) - if fileInfo != nil && !fileInfo.IsDir() { - size += fileInfo.Size() - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change) (Archive, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, ".wh."+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go deleted file mode 100644 index 378cc09c8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_linux.go +++ /dev/null @@ -1,285 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of syscall.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go deleted file mode 100644 index 35832f087..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go deleted file mode 100644 index 9d528e614..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_posix_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "sort" - "testing" -) - -func TestHardLinkOrder(t *testing.T) { - names := []string{"file1.txt", "file2.txt", "file3.txt"} - msg := []byte("Hey y'all") - - // Create dir - src, err := ioutil.TempDir("", "docker-hardlink-test-src-") - if err != nil { - t.Fatal(err) - } - //defer os.RemoveAll(src) - for _, name := range names { - func() { - fh, err := os.Create(path.Join(src, name)) - if err != nil { - t.Fatal(err) - } - defer fh.Close() - if _, err = fh.Write(msg); err != nil { - t.Fatal(err) - } - }() - } - // Create dest, with changes that includes hardlinks - dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") - if err != nil { - t.Fatal(err) - } - os.RemoveAll(dest) // we just want the name, at first - if err := copyDir(src, dest); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - for _, name := range names { - for i := 0; i < 5; i++ { - if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { - t.Fatal(err) - } - } - } - - // get changes - changes, err := ChangesDirs(dest, src) - if err != nil { - t.Fatal(err) - } - - // sort - sort.Sort(changesByPath(changes)) - - // ExportChanges - ar, err := ExportChanges(dest, changes) - if err != nil { - t.Fatal(err) - } - hdrs, err := walkHeaders(ar) - if err != nil { - t.Fatal(err) - } - - // reverse sort - sort.Sort(sort.Reverse(changesByPath(changes))) - // ExportChanges - arRev, err := ExportChanges(dest, changes) - if err != nil { - t.Fatal(err) - } - hdrsRev, err := walkHeaders(arRev) - if err != nil { - t.Fatal(err) - } - - // line up the two sets - sort.Sort(tarHeaders(hdrs)) - sort.Sort(tarHeaders(hdrsRev)) - - // compare Size and LinkName - for i := range hdrs { - if hdrs[i].Name != hdrsRev[i].Name { - t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) - } - if hdrs[i].Size != hdrsRev[i].Size { - t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) - } - if hdrs[i].Typeflag != hdrsRev[i].Typeflag { - t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) - } - if hdrs[i].Linkname != hdrsRev[i].Linkname { - t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) - } - } - -} - -type tarHeaders []tar.Header - -func (th tarHeaders) Len() int { return len(th) } -func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } -func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } - -func walkHeaders(r io.Reader) ([]tar.Header, error) { - t := tar.NewReader(r) - headers := []tar.Header{} - for { - hdr, err := t.Next() - if err != nil { - if err == io.EOF { - break - } - return headers, err - } - headers = append(headers, *hdr) - } - return headers, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go deleted file mode 100644 index 509bdb2e6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_test.go +++ /dev/null @@ -1,495 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "sort" - "testing" - "time" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if err := cmd.Run(); err != nil { - return err - } - return nil -} - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - } - - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - if err := os.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } - } - } -} - -func TestChangeString(t *testing.T) { - modifiyChange := Change{"change", ChangeModify} - toString := modifiyChange.String() - if toString != "C change" { - t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) - } - addChange := Change{"change", ChangeAdd} - toString = addChange.String() - if toString != "A change" { - t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) - } - deleteChange := Change{"change", ChangeDelete} - toString = deleteChange.String() - if toString != "D change" { - t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) - } -} - -func TestChangesWithNoChanges(t *testing.T) { - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - if len(changes) != 0 { - t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) - } -} - -func TestChangesWithChanges(t *testing.T) { - // Mock the readonly layer - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) - - // Mock the RW layer - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - - // Create a folder in RW layer - dir1 := path.Join(rwLayer, "dir1") - os.MkdirAll(dir1, 0740) - deletedFile := path.Join(dir1, ".wh.file1-2") - ioutil.WriteFile(deletedFile, []byte{}, 0600) - modifiedFile := path.Join(dir1, "file1-1") - ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) - // Let's add a subfolder for a newFile - subfolder := path.Join(dir1, "subfolder") - os.MkdirAll(subfolder, 0740) - newFile := path.Join(subfolder, "newFile") - ioutil.WriteFile(newFile, []byte{}, 0740) - - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1", ChangeModify}, - {"/dir1/file1-1", ChangeModify}, - {"/dir1/file1-2", ChangeDelete}, - {"/dir1/subfolder", ChangeModify}, - {"/dir1/subfolder/newFile", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) -} - -// See https://github.com/docker/docker/pull/13590 -func TestChangesWithChangesGH13590(t *testing.T) { - baseLayer, err := ioutil.TempDir("", "docker-changes-test.") - defer os.RemoveAll(baseLayer) - - dir3 := path.Join(baseLayer, "dir1/dir2/dir3") - os.MkdirAll(dir3, 07400) - - file := path.Join(dir3, "file.txt") - ioutil.WriteFile(file, []byte("hello"), 0666) - - layer, err := ioutil.TempDir("", "docker-changes-test2.") - defer os.RemoveAll(layer) - - // Test creating a new file - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) - file = path.Join(layer, "dir1/dir2/dir3/file1.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err := Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1/dir2/dir3", ChangeModify}, - {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) - - // Now test changing a file - layer, err = ioutil.TempDir("", "docker-changes-test3.") - defer os.RemoveAll(layer) - - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - file = path.Join(layer, "dir1/dir2/dir3/file.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err = Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges = []Change{ - {"/dir1/dir2/dir3/file.txt", ChangeModify}, - } - checkChanges(expectedChanges, changes, t) -} - -// Create an directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dst) - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } - - // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } - - // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } - - // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { - t.Fatal(err) - } - - // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { - t.Fatal(err) - } - - // Touch file - if err := os.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } - - // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } - - // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } - - // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } - - // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } - - // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - - // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } - - // Touch dir - if err := os.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } -} - -func TestChangesDirsMutated(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - sort.Sort(changesByPath(changes)) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - layer, err := ExportChanges(dst, changes) - if err != nil { - t.Fatal(err) - } - - layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } - - if _, err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } - - changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} - -func TestChangesSizeWithNoChanges(t *testing.T) { - size := ChangesSize("/tmp", nil) - if size != 0 { - t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) - } -} - -func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { - changes := []Change{ - {Path: "deletedPath", Kind: ChangeDelete}, - } - size := ChangesSize("/tmp", changes) - if size != 0 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) - } -} - -func TestChangesSize(t *testing.T) { - parentPath, err := ioutil.TempDir("", "docker-changes-test") - defer os.RemoveAll(parentPath) - addition := path.Join(parentPath, "addition") - if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - modification := path.Join(parentPath, "modification") - if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - changes := []Change{ - {Path: "addition", Kind: ChangeAdd}, - {Path: "modification", Kind: ChangeModify}, - } - size := ChangesSize(parentPath, changes) - if size != 6 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) - } -} - -func checkChanges(expectedChanges, changes []Change, t *testing.T) { - sort.Sort(changesByPath(expectedChanges)) - sort.Sort(changesByPath(changes)) - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go deleted file mode 100644 index dc1ea608b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !windows - -package archive - -import ( - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.Uid() != newStat.Uid() || - oldStat.Gid() != newStat.Gid() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go deleted file mode 100644 index 6026575e5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package archive - -import ( - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.Stat_t, newStat *system.Stat_t) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go deleted file mode 100644 index 576f336ba..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go +++ /dev/null @@ -1,308 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - if !SpecifiesCurrentDir(cleanedPath) && SpecifiesCurrentDir(originalPath) { - if !HasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !HasTrailingPathSeparator(cleanedPath) && HasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// AssertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func AssertsDirectory(path string) bool { - return HasTrailingPathSeparator(path) || SpecifiesCurrentDir(path) -} - -// HasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func HasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// SpecifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func SpecifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its -// parent directory and its basename in that directory. -func SplitPathDirEntry(localizedPath string) (dir, base string) { - normalizedPath := filepath.ToSlash(localizedPath) - vol := filepath.VolumeName(normalizedPath) - normalizedPath = normalizedPath[len(vol):] - - if normalizedPath == "/" { - // Specifies the root path. - return filepath.FromSlash(vol + normalizedPath), "." - } - - trimmedPath := vol + strings.TrimRight(normalizedPath, "/") - - dir = filepath.FromSlash(path.Dir(trimmedPath)) - base = filepath.FromSlash(path.Base(trimmedPath)) - - return dir, base -} - -// TarResource archives the resource at the given sourcePath into a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourcePath string) (content Archive, err error) { - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - if len(sourcePath) > 1 && HasTrailingPathSeparator(sourcePath) { - // In the case where the source path is a symbolic link AND it ends - // with a path separator, we will want to evaluate the symbolic link. - trimmedPath := sourcePath[:len(sourcePath)-1] - stat, err := os.Lstat(trimmedPath) - if err != nil { - return nil, err - } - - if stat.Mode()&os.ModeSymlink != 0 { - if sourcePath, err = filepath.EvalSymlinks(trimmedPath); err != nil { - return nil, err - } - } - } - - // Separate the source path between it's directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool -} - -// CopyInfoStatPath stats the given path to create a CopyInfo -// struct representing that resource. If mustExist is true, then -// it is an error if there is no file or directory at the given path. -func CopyInfoStatPath(path string, mustExist bool) (CopyInfo, error) { - pathInfo := CopyInfo{Path: path} - - fileInfo, err := os.Lstat(path) - - if err == nil { - pathInfo.Exists, pathInfo.IsDir = true, fileInfo.IsDir() - } else if os.IsNotExist(err) && !mustExist { - err = nil - } - - return pathInfo, err -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent ArchiveReader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case AssertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// rebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurance of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent ArchiveReader, oldBase, newBase string) Archive { - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string) error { - var ( - srcInfo CopyInfo - err error - ) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoStatPath(srcPath, true); err != nil { - return err - } - - content, err := TarResource(srcPath) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content ArchiveReader, srcInfo CopyInfo, dstPath string) error { - dstInfo, err := CopyInfoStatPath(dstPath, false) - if err != nil { - return err - } - - if !dstInfo.Exists { - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(dstPath) - - dstStat, err := os.Lstat(dstParent) - if err != nil { - return err - } - if !dstStat.IsDir() { - return ErrNotDirectory - } - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go deleted file mode 100644 index dd0b32362..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_test.go +++ /dev/null @@ -1,637 +0,0 @@ -package archive - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" -) - -func removeAllPaths(paths ...string) { - for _, path := range paths { - os.RemoveAll(path) - } -} - -func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { - var err error - - if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - return -} - -func isNotDir(err error) bool { - return strings.Contains(err.Error(), "not a directory") -} - -func joinTrailingSep(pathElements ...string) string { - joined := filepath.Join(pathElements...) - - return fmt.Sprintf("%s%c", joined, filepath.Separator) -} - -func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { - t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) - - fileA, err := os.Open(filenameA) - if err != nil { - return - } - defer fileA.Close() - - fileB, err := os.Open(filenameB) - if err != nil { - return - } - defer fileB.Close() - - hasher := sha256.New() - - if _, err = io.Copy(hasher, fileA); err != nil { - return - } - - hashA := hasher.Sum(nil) - hasher.Reset() - - if _, err = io.Copy(hasher, fileB); err != nil { - return - } - - hashB := hasher.Sum(nil) - - if !bytes.Equal(hashA, hashB) { - err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) - } - - return -} - -func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { - t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) - - var changes []Change - - if changes, err = ChangesDirs(newDir, oldDir); err != nil { - return - } - - if len(changes) != 0 { - err = fmt.Errorf("expected no changes between directories, but got: %v", changes) - } - - return -} - -func logDirContents(t *testing.T, dirPath string) { - logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { - if err != nil { - t.Errorf("stat error for path %q: %s", path, err) - return nil - } - - if info.IsDir() { - path = joinTrailingSep(path) - } - - t.Logf("\t%s", path) - - return nil - }) - - t.Logf("logging directory contents: %q", dirPath) - - if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { - t.Fatal(err) - } -} - -func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q", srcPath, dstPath) - - return CopyResource(srcPath, dstPath) -} - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func TestCopyErrSrcNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - content, err := TarResource(filepath.Join(tmpDirA, "file1")) - if err == nil { - content.Close() - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func TestCopyErrSrcNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - content, err := TarResource(joinTrailingSep(tmpDirA, "file1")) - if err == nil { - content.Close() - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Test for error when SRC is a valid file or directory, -// but the DST parent directory does not exist. -func TestCopyErrDstParentNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - // Try with a file source. - content, err := TarResource(srcInfo.Path) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a file whose parent does not exist. - if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo.Path) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a directory whose parent does not exist. - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func TestCopyErrDstNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - // Try with a file source. - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - content, err := TarResource(srcInfo.Path) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo.Path) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func TestCopyCaseA(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "itWorks.txt") - - var err error - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func TestCopyCaseB(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := joinTrailingSep(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseC(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // Ensure they start out different. - if err = fileContentsEqual(t, srcPath, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseD(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "file1") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseE(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func TestCopyCaseF(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseG(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dir1") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseH(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func TestCopyCaseI(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJ(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go deleted file mode 100644 index 10a63a051..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go +++ /dev/null @@ -1,210 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system" -) - -func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertantly. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, ".wh..wh.") { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, ".wh..wh.plnk") && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { - return 0, err - } - } - continue - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, ".wh.") { - originalBase := base[len(".wh."):] - originalPath := filepath.Join(filepath.Dir(path), originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), ".wh..wh.plnk") { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} - if err := syscall.UtimesNano(path, ts); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer ArchiveReader) (int64, error) { - return applyLayerHandler(dest, layer, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer ArchiveReader) (int64, error) { - return applyLayerHandler(dest, layer, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer ArchiveReader, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go deleted file mode 100644 index 01ed43728..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package archive - -import ( - "archive/tar" - "testing" -) - -func TestApplyLayerInvalidFilenames(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidHardlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidSymlink(t *testing.T) { - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go deleted file mode 100644 index a5e08e4ee..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac054..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go deleted file mode 100644 index f5cacea8f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/utils_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" -) - -var testUntarFns = map[string]func(string, io.Reader) error{ - "untar": func(dest string, r io.Reader) error { - return Untar(r, dest, nil) - }, - "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, ArchiveReader(r)) - return err - }, -} - -// testBreakout is a helper function that, within the provided `tmpdir` directory, -// creates a `victim` folder with a generated `hello` file in it. -// `untar` extracts to a directory named `dest`, the tar file created from `headers`. -// -// Here are the tested scenarios: -// - removed `victim` folder (write) -// - removed files from `victim` folder (write) -// - new files in `victim` folder (write) -// - modified files in `victim` folder (write) -// - file in `dest` with same content as `victim/hello` (read) -// -// When using testBreakout make sure you cover one of the scenarios listed above. -func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { - tmpdir, err := ioutil.TempDir("", tmpdir) - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - dest := filepath.Join(tmpdir, "dest") - if err := os.Mkdir(dest, 0755); err != nil { - return err - } - - victim := filepath.Join(tmpdir, "victim") - if err := os.Mkdir(victim, 0755); err != nil { - return err - } - hello := filepath.Join(victim, "hello") - helloData, err := time.Now().MarshalText() - if err != nil { - return err - } - if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { - return err - } - helloStat, err := os.Stat(hello) - if err != nil { - return err - } - - reader, writer := io.Pipe() - go func() { - t := tar.NewWriter(writer) - for _, hdr := range headers { - t.WriteHeader(hdr) - } - t.Close() - }() - - untar := testUntarFns[untarFn] - if untar == nil { - return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) - } - if err := untar(dest, reader); err != nil { - if _, ok := err.(breakoutError); !ok { - // If untar returns an error unrelated to an archive breakout, - // then consider this an unexpected error and abort. - return err - } - // Here, untar detected the breakout. - // Let's move on verifying that indeed there was no breakout. - fmt.Printf("breakoutError: %v\n", err) - } - - // Check victim folder - f, err := os.Open(victim) - if err != nil { - // codepath taken if victim folder was removed - return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) - } - defer f.Close() - - // Check contents of victim folder - // - // We are only interested in getting 2 files from the victim folder, because if all is well - // we expect only one result, the `hello` file. If there is a second result, it cannot - // hold the same name `hello` and we assume that a new file got created in the victim folder. - // That is enough to detect an archive breakout. - names, err := f.Readdirnames(2) - if err != nil { - // codepath taken if victim is not a folder - return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) - } - for _, name := range names { - if name != "hello" { - // codepath taken if new file was created in victim folder - return fmt.Errorf("archive breakout: new file %q", name) - } - } - - // Check victim/hello - f, err = os.Open(hello) - if err != nil { - // codepath taken if read permissions were removed - return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fi, err := f.Stat() - if err != nil { - return err - } - if helloStat.IsDir() != fi.IsDir() || - // TODO: cannot check for fi.ModTime() change - helloStat.Mode() != fi.Mode() || - helloStat.Size() != fi.Size() || - !bytes.Equal(helloData, b) { - // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) - } - - // Check that nothing in dest/ has the same content as victim/hello. - // Since victim/hello was generated with time.Now(), it is safe to assume - // that any file whose content matches exactly victim/hello, managed somehow - // to access victim/hello. - return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - if err != nil { - // skip directory if error - return filepath.SkipDir - } - // enter directory - return nil - } - if err != nil { - // skip file if error - return nil - } - b, err := ioutil.ReadFile(path) - if err != nil { - // Houston, we have a problem. Aborting (space)walk. - return err - } - if bytes.Equal(helloData, b) { - return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) - } - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go deleted file mode 100644 index dfb335c0b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go deleted file mode 100644 index 46ab36697..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/wrap_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" - "testing" -) - -func TestGenerateEmptyFile(t *testing.T) { - archive, err := Generate("emptyFile") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"emptyFile", ""}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} - -func TestGenerateWithContent(t *testing.T) { - archive, err := Generate("file", "content") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"file", "content"}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go deleted file mode 100644 index 1b8cadc63..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,196 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -// exclusion return true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' -} - -// empty return true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on it's own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { - // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { - continue - } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") - } - exceptions = true - } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] - } - patternDirs = append(patternDirs, strings.Split(pattern, "/")) - } - - return cleanedPatterns, patternDirs, exceptions, nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doen't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { - matched := false - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, "/") - - for i, pattern := range patterns { - negative := false - - if exclusion(pattern) { - negative = true - pattern = pattern[1:] - } - - match, err := filepath.Match(pattern, file) - if err != nil { - return false, err - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = filepath.Match(strings.Join(patDirs[i], "/"), - strings.Join(parentPathDirs[:len(patDirs[i])], "/")) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and remove -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go deleted file mode 100644 index b544ffbf2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils_test.go +++ /dev/null @@ -1,402 +0,0 @@ -package fileutils - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" -) - -// CopyFile with invalid src -func TestCopyFileWithInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with invalid dest -func TestCopyFileWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "file") - err = ioutil.WriteFile(src, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with same src and dest -func TestCopyFileWithSameSrcAndDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - file := path.Join(tempFolder, "file") - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, file) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -// CopyFile with same src and dest but path is different and not clean -func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - testFolder := path.Join(tempFolder, "test") - err = os.MkdirAll(testFolder, 0740) - if err != nil { - t.Fatal(err) - } - file := path.Join(testFolder, "file") - sameFile := testFolder + "/../test/file" - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, sameFile) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -func TestCopyFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "src") - dest := path.Join(tempFolder, "dest") - ioutil.WriteFile(src, []byte("content"), 0777) - ioutil.WriteFile(dest, []byte("destContent"), 0777) - bytes, err := CopyFile(src, dest) - if err != nil { - t.Fatal(err) - } - if bytes != 7 { - t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) - } - actual, err := ioutil.ReadFile(dest) - if err != nil { - t.Fatal(err) - } - if string(actual) != "content" { - t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWildcardMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*"}) - if match != true { - t.Errorf("failed to get a wildcard match, got %v", match) - } -} - -// A simple pattern match should return true. -func TestPatternMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go"}) - if match != true { - t.Errorf("failed to get a match, got %v", match) - } -} - -// An exclusion followed by an inclusion should return true. -func TestExclusionPatternMatchesPatternBefore(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if match != true { - t.Errorf("failed to get true match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A pattern followed by an exclusion should return false. -func TestExclusionPatternMatchesPatternAfter(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match != false { - t.Errorf("failed to get false match on exclusion pattern, got %v", match) - } -} - -// A filename evaluating to . should return false. -func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { - match, _ := Matches(".", []string{"*.go"}) - if match != false { - t.Errorf("failed to get false match on ., got %v", match) - } -} - -// A single ! pattern should return an error. -func TestSingleExclamationError(t *testing.T) { - _, err := Matches("fileutils.go", []string{"!"}) - if err == nil { - t.Errorf("failed to get an error for a single exclamation point, got %v", err) - } -} - -// A string preceded with a ! should return true from Exclusion. -func TestExclusion(t *testing.T) { - exclusion := exclusion("!") - if !exclusion { - t.Errorf("failed to get true for a single !, got %v", exclusion) - } -} - -// Matches with no patterns -func TestMatchesWithNoPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{}) - if err != nil { - t.Fatal(err) - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// Matches with malformed patterns -func TestMatchesWithMalformedPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{"["}) - if err == nil { - t.Fatal("Should have failed because of a malformed syntax in the pattern") - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// An empty string should return true from Empty. -func TestEmpty(t *testing.T) { - empty := empty("") - if !empty { - t.Errorf("failed to get true for an empty string, got %v", empty) - } -} - -func TestCleanPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsExceptionFlag(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsErrorSingleException(t *testing.T) { - _, _, _, err := CleanPatterns([]string{"!"}) - if err == nil { - t.Errorf("expected error on single exclamation point, got %v", err) - } -} - -func TestCleanPatternsFolderSplit(t *testing.T) { - _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) - if dirs[0][0] != "docs" { - t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) - } - if dirs[0][1] != "config" { - t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) - } -} - -func TestCreateIfNotExistsDir(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - folderToCreate := filepath.Join(tempFolder, "tocreate") - - if err := CreateIfNotExists(folderToCreate, true); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(folderToCreate) - if err != nil { - t.Fatalf("Should have create a folder, got %v", err) - } - - if !fileinfo.IsDir() { - t.Fatalf("Should have been a dir, seems it's not") - } -} - -func TestCreateIfNotExistsFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - fileToCreate := filepath.Join(tempFolder, "file/to/create") - - if err := CreateIfNotExists(fileToCreate, false); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(fileToCreate) - if err != nil { - t.Fatalf("Should have create a file, got %v", err) - } - - if fileinfo.IsDir() { - t.Fatalf("Should have been a file, seems it's not") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go deleted file mode 100644 index dcae17882..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go deleted file mode 100644 index 7a95cb2bd..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir/homedir_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir - -import ( - "path/filepath" - "testing" -) - -func TestGet(t *testing.T) { - home := Get() - if home == "" { - t.Fatal("returned home directory is empty") - } - - if !filepath.IsAbs(home) { - t.Fatalf("returned path is not absolute: %s", home) - } -} - -func TestGetShortcutString(t *testing.T) { - shortcut := GetShortcutString() - if shortcut == "" { - t.Fatal("returned shortcut string is empty") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go deleted file mode 100644 index 801132ff3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt.go +++ /dev/null @@ -1,14 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go deleted file mode 100644 index 896886329..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/fmt_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package ioutils - -import "testing" - -func TestFprintfIfNotEmpty(t *testing.T) { - wc := NewWriteCounter(&NopWriter{}) - n, _ := FprintfIfNotEmpty(wc, "foo%s", "") - - if wc.Count != 0 || n != 0 { - t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) - } - - n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") - if wc.Count != 6 || n != 6 { - t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go deleted file mode 100644 index f231aa9da..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go +++ /dev/null @@ -1,226 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx += 1 - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - var rdr io.ReadSeeker - var rdrOffset int64 - - for i, rdr := range r.readers { - offsetTo, err := r.getOffsetToReader(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo > offset { - rdr = r.readers[i-1] - rdrOffset = offsetTo - offset - break - } - - if rdr == r.readers[len(r.readers)-1] { - rdrOffset = offsetTo + offset - break - } - } - - return rdr, rdrOffset, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bCap := int64(cap(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bCap) - if err != nil && err != io.EOF { - return -1, err - } - bCap -= readBytes - - if bCap == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go deleted file mode 100644 index de495b56d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestMultiReadSeekerReadAll(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) - - b, err := ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - expected := "hello world 1hello world 2hello world 3" - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } - - size, err := mr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if size != expectedSize { - t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) - } - - // Reset the position and read again - pos, err := mr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if pos != 0 { - t.Fatalf("expected position to be set to 0, got %d", pos) - } - - b, err = ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } -} - -func TestMultiReadSeekerReadEach(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - var totalBytes int64 - for i, s := range []*strings.Reader{s1, s2, s3} { - sLen := int64(s.Len()) - buf := make([]byte, s.Len()) - expected := []byte(fmt.Sprintf("%s %d", str, i+1)) - - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - - if !bytes.Equal(buf, expected) { - t.Fatalf("expected %q to be %q", string(buf), string(expected)) - } - - pos, err := mr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("iteration: %d, error: %v", i+1, err) - } - - // check that the total bytes read is the current position of the seeker - totalBytes += sLen - if pos != totalBytes { - t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) - } - - // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well - newPos, err := mr.Seek(pos, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if newPos != pos { - t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) - } - } -} - -func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - buf := make([]byte, s1.Len()+3) - _, err := mr.Read(buf) - if err != nil { - t.Fatal(err) - } - - // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string - expected := "hello world 1hel" - if string(buf) != expected { - t.Fatalf("expected %s to be %s", string(buf), expected) - } -} - -func TestMultiReadSeekerNegativeSeek(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - s1Len := s1.Len() - s2Len := s2.Len() - s3Len := s3.Len() - - s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if s != int64(s1Len+s2Len) { - t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) - } - - buf := make([]byte, s3Len) - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - expected := fmt.Sprintf("%s %d", str, 3) - if string(buf) != fmt.Sprintf("%s %d", str, 3) { - t.Fatalf("expected %q to be %q", string(buf), expected) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go deleted file mode 100644 index ff09baad1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go +++ /dev/null @@ -1,254 +0,0 @@ -package ioutils - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "io" - "math/big" - "sync" - "time" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// bufReader allows the underlying reader to continue to produce -// output by pre-emptively reading from the wrapped reader. -// This is achieved by buffering this data in bufReader's -// expanding buffer. -type bufReader struct { - sync.Mutex - buf *bytes.Buffer - reader io.Reader - err error - wait sync.Cond - drainBuf []byte - reuseBuf []byte - maxReuse int64 - resetTimeout time.Duration - bufLenResetThreshold int64 - maxReadDataReset int64 -} - -func NewBufReader(r io.Reader) *bufReader { - var timeout int - if randVal, err := rand.Int(rand.Reader, big.NewInt(120)); err == nil { - timeout = int(randVal.Int64()) + 180 - } else { - timeout = 300 - } - reader := &bufReader{ - buf: &bytes.Buffer{}, - drainBuf: make([]byte, 1024), - reuseBuf: make([]byte, 4096), - maxReuse: 1000, - resetTimeout: time.Second * time.Duration(timeout), - bufLenResetThreshold: 100 * 1024, - maxReadDataReset: 10 * 1024 * 1024, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer *bytes.Buffer) *bufReader { - reader := &bufReader{ - buf: buffer, - drainBuf: drainBuffer, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - var ( - duration time.Duration - lastReset time.Time - now time.Time - reset bool - bufLen int64 - dataSinceReset int64 - maxBufLen int64 - reuseBufLen int64 - reuseCount int64 - ) - reuseBufLen = int64(len(r.reuseBuf)) - lastReset = time.Now() - for { - n, err := r.reader.Read(r.drainBuf) - dataSinceReset += int64(n) - r.Lock() - bufLen = int64(r.buf.Len()) - if bufLen > maxBufLen { - maxBufLen = bufLen - } - - // Avoid unbounded growth of the buffer over time. - // This has been discovered to be the only non-intrusive - // solution to the unbounded growth of the buffer. - // Alternative solutions such as compression, multiple - // buffers, channels and other similar pieces of code - // were reducing throughput, overall Docker performance - // or simply crashed Docker. - // This solution releases the buffer when specific - // conditions are met to avoid the continuous resizing - // of the buffer for long lived containers. - // - // Move data to the front of the buffer if it's - // smaller than what reuseBuf can store - if bufLen > 0 && reuseBufLen >= bufLen { - n, _ := r.buf.Read(r.reuseBuf) - r.buf.Write(r.reuseBuf[0:n]) - // Take action if the buffer has been reused too many - // times and if there's data in the buffer. - // The timeout is also used as means to avoid doing - // these operations more often or less often than - // required. - // The various conditions try to detect heavy activity - // in the buffer which might be indicators of heavy - // growth of the buffer. - } else if reuseCount >= r.maxReuse && bufLen > 0 { - now = time.Now() - duration = now.Sub(lastReset) - timeoutReached := duration >= r.resetTimeout - - // The timeout has been reached and the - // buffered data couldn't be moved to the front - // of the buffer, so the buffer gets reset. - if timeoutReached && bufLen > reuseBufLen { - reset = true - } - // The amount of buffered data is too high now, - // reset the buffer. - if timeoutReached && maxBufLen >= r.bufLenResetThreshold { - reset = true - } - // Reset the buffer if a certain amount of - // data has gone through the buffer since the - // last reset. - if timeoutReached && dataSinceReset >= r.maxReadDataReset { - reset = true - } - // The buffered data is moved to a fresh buffer, - // swap the old buffer with the new one and - // reset all counters. - if reset { - newbuf := &bytes.Buffer{} - newbuf.ReadFrom(r.buf) - r.buf = newbuf - lastReset = now - reset = false - dataSinceReset = 0 - maxBufLen = 0 - reuseCount = 0 - } - } - if err != nil { - r.err = err - } else { - r.buf.Write(r.drainBuf[0:n]) - } - reuseCount++ - r.wait.Signal() - r.Unlock() - callSchedulerIfNecessary() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} - -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go deleted file mode 100644 index 0a39b6ec6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" -) - -// Implement io.Reader -type errorReader struct{} - -func (r *errorReader) Read(p []byte) (int, error) { - return 0, fmt.Errorf("Error reader always fail.") -} - -func TestReadCloserWrapperClose(t *testing.T) { - reader := strings.NewReader("A string reader") - wrapper := NewReadCloserWrapper(reader, func() error { - return fmt.Errorf("This will be called when closing") - }) - err := wrapper.Close() - if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { - t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") - } -} - -func TestReaderErrWrapperReadOnError(t *testing.T) { - called := false - reader := &errorReader{} - wrapper := NewReaderErrWrapper(reader, func() { - called = true - }) - _, err := wrapper.Read([]byte{}) - if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { - t.Fatalf("readErrWrapper should returned an error") - } - if !called { - t.Fatalf("readErrWrapper should have call the anonymous function on failure") - } -} - -func TestReaderErrWrapperRead(t *testing.T) { - reader := strings.NewReader("a string reader.") - wrapper := NewReaderErrWrapper(reader, func() { - t.Fatalf("readErrWrapper should not have called the anonymous function") - }) - // Read 20 byte (should be ok with the string above) - num, err := wrapper.Read(make([]byte, 20)) - if err != nil { - t.Fatal(err) - } - if num != 16 { - t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) - } -} - -func TestNewBufReaderWithDrainbufAndBuffer(t *testing.T) { - reader, writer := io.Pipe() - - drainBuffer := make([]byte, 1024) - buffer := bytes.Buffer{} - bufreader := NewBufReaderWithDrainbufAndBuffer(reader, drainBuffer, &buffer) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - <-done - - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -func TestBufReader(t *testing.T) { - reader, writer := io.Pipe() - bufreader := NewBufReader(reader) - - // Write everything down to a Pipe - // Usually, a pipe should block but because of the buffered reader, - // the writes will go through - done := make(chan bool) - go func() { - writer.Write([]byte("hello world")) - writer.Close() - done <- true - }() - - // Drain the reader *after* everything has been written, just to verify - // it is indeed buffering - <-done - output, err := ioutil.ReadAll(bufreader) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(output, []byte("hello world")) { - t.Error(string(output)) - } -} - -func TestBufReaderCloseWithNonReaderCloser(t *testing.T) { - reader := strings.NewReader("buffer") - bufreader := NewBufReader(reader) - - if err := bufreader.Close(); err != nil { - t.Fatal(err) - } - -} - -// implements io.ReadCloser -type simpleReaderCloser struct{} - -func (r *simpleReaderCloser) Read(p []byte) (n int, err error) { - return 0, nil -} - -func (r *simpleReaderCloser) Close() error { - return nil -} - -func TestBufReaderCloseWithReaderCloser(t *testing.T) { - reader := &simpleReaderCloser{} - bufreader := NewBufReader(reader) - - err := bufreader.Close() - if err != nil { - t.Fatal(err) - } - -} - -func TestHashData(t *testing.T) { - reader := strings.NewReader("hash-me") - actual, err := HashData(reader) - if err != nil { - t.Fatal(err) - } - expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" - if actual != expected { - t.Fatalf("Expecting %s, got %s", expected, actual) - } -} - -type repeatedReader struct { - readCount int - maxReads int - data []byte -} - -func newRepeatedReader(max int, data []byte) *repeatedReader { - return &repeatedReader{0, max, data} -} - -func (r *repeatedReader) Read(p []byte) (int, error) { - if r.readCount >= r.maxReads { - return 0, io.EOF - } - r.readCount++ - n := copy(p, r.data) - return n, nil -} - -func testWithData(data []byte, reads int) { - reader := newRepeatedReader(reads, data) - bufReader := NewBufReader(reader) - io.Copy(ioutil.Discard, bufReader) -} - -func Benchmark1M10BytesReads(b *testing.B) { - reads := 1000000 - readSize := int64(10) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} - -func Benchmark1M1024BytesReads(b *testing.B) { - reads := 1000000 - readSize := int64(1024) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} - -func Benchmark10k32KBytesReads(b *testing.B) { - reads := 10000 - readSize := int64(32 * 1024) - data := make([]byte, readSize) - b.SetBytes(readSize * int64(reads)) - b.ResetTimer() - for i := 0; i < b.N; i++ { - testWithData(data, reads) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go deleted file mode 100644 index 3c88f29e3..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !gccgo - -package ioutils - -func callSchedulerIfNecessary() { -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go deleted file mode 100644 index c11d02b94..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/scheduler_gccgo.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build gccgo - -package ioutils - -import ( - "runtime" -) - -func callSchedulerIfNecessary() { - //allow or force Go scheduler to switch context, without explicitly - //forcing this will make it hang when using gccgo implementation - runtime.Gosched() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go deleted file mode 100644 index 25095474d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,47 +0,0 @@ -package ioutils - -import ( - "io" - "net/http" - "sync" -) - -type WriteFlusher struct { - sync.Mutex - w io.Writer - flusher http.Flusher - flushed bool -} - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() - n, err = wf.w.Write(b) - wf.flushed = true - wf.flusher.Flush() - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() - wf.flushed = true - wf.flusher.Flush() -} - -func (wf *WriteFlusher) Flushed() bool { - wf.Lock() - defer wf.Unlock() - return wf.flushed -} - -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var flusher http.Flusher - if f, ok := w.(http.Flusher); ok { - flusher = f - } else { - flusher = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: flusher} -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go deleted file mode 100644 index 43fdc44ea..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go +++ /dev/null @@ -1,60 +0,0 @@ -package ioutils - -import "io" - -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -type NopFlusher struct{} - -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// Wrap a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go deleted file mode 100644 index 564b1cd4f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package ioutils - -import ( - "bytes" - "strings" - "testing" -) - -func TestWriteCloserWrapperClose(t *testing.T) { - called := false - writer := bytes.NewBuffer([]byte{}) - wrapper := NewWriteCloserWrapper(writer, func() error { - called = true - return nil - }) - if err := wrapper.Close(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("writeCloserWrapper should have call the anonymous function.") - } -} - -func TestNopWriteCloser(t *testing.T) { - writer := bytes.NewBuffer([]byte{}) - wrapper := NopWriteCloser(writer) - if err := wrapper.Close(); err != nil { - t.Fatal("NopWriteCloser always return nil on Close.") - } - -} - -func TestNopWriter(t *testing.T) { - nw := &NopWriter{} - l, err := nw.Write([]byte{'c'}) - if err != nil { - t.Fatal(err) - } - if l != 1 { - t.Fatalf("Expected 1 got %d", l) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE deleted file mode 100644 index ac74d8f04..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2015 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md deleted file mode 100644 index da00efa33..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/README.md +++ /dev/null @@ -1,40 +0,0 @@ -Package mflag (aka multiple-flag) implements command-line flag parsing. -It's an **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) - -It adds: - -* both short and long flag version -`./example -s red` `./example --string blue` - -* multiple names for the same option -``` -$>./example -h -Usage of example: - -s, --string="": a simple string -``` - -___ -It is very flexible on purpose, so you can do things like: -``` -$>./example -h -Usage of example: - -s, -string, --string="": a simple string -``` - -Or: -``` -$>./example -h -Usage of example: - -oldflag, --newflag="": a simple string -``` - -You can also hide some flags from the usage, so if we want only `--newflag`: -``` -$>./example -h -Usage of example: - --newflag="": a simple string -$>./example -oldflag str -str -``` - -See [example.go](example/example.go) for more details. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go deleted file mode 100644 index ebfa35010..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag.go +++ /dev/null @@ -1,1201 +0,0 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mflag implements command-line flag parsing. -// -// Usage: -// -// Define flags using flag.String(), Bool(), Int(), etc. -// -// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. -// import "flag /github.com/docker/docker/pkg/mflag" -// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") -// If you like, you can bind the flag to a variable using the Var() functions. -// var flagvar int -// func init() { -// // -flaghidden will work, but will be hidden from the usage -// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") -// } -// Or you can create custom flags that satisfy the Value interface (with -// pointer receivers) and couple them to flag parsing by -// flag.Var(&flagVal, []string{"name"}, "help message for flagname") -// For such flags, the default value is just the initial value of the variable. -// -// You can also add "deprecated" flags, they are still usable, but are not shown -// in the usage and will display a warning when you try to use them. `#` before -// an option means this option is deprecated, if there is an following option -// without `#` ahead, then that's the replacement, if not, it will just be removed: -// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") -// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or -// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` -// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") -// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` -// so you can only use `-f`. -// -// You can also group one letter flags, bif you declare -// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") -// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") -// you will be able to use the -vs or -sv -// -// After all flags are defined, call -// flag.Parse() -// to parse the command line into the defined flags. -// -// Flags may then be used directly. If you're using the flags themselves, -// they are all pointers; if you bind to variables, they're values. -// fmt.Println("ip has value ", *ip) -// fmt.Println("flagvar has value ", flagvar) -// -// After parsing, the arguments after the flag are available as the -// slice flag.Args() or individually as flag.Arg(i). -// The arguments are indexed from 0 through flag.NArg()-1. -// -// Command line flag syntax: -// -flag -// -flag=x -// -flag="x" -// -flag='x' -// -flag x // non-boolean flags only -// One or two minus signs may be used; they are equivalent. -// The last form is not permitted for boolean flags because the -// meaning of the command -// cmd -x * -// will change if there is a file called 0, false, etc. You must -// use the -flag=false form to turn off a boolean flag. -// -// Flag parsing stops just before the first non-flag argument -// ("-" is a non-flag argument) or after the terminator "--". -// -// Integer flags accept 1234, 0664, 0x1234 and may be negative. -// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. -// Duration flags accept any input valid for time.ParseDuration. -// -// The default set of command-line flags is controlled by -// top-level functions. The FlagSet type allows one to define -// independent sets of flags, such as to implement subcommands -// in a command-line interface. The methods of FlagSet are -// analogous to the top-level functions for the command-line -// flag set. - -package mflag - -import ( - "errors" - "fmt" - "io" - "os" - "runtime" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("flag: help requested") - -// ErrRetry is the error returned if you need to try letter by letter -var ErrRetry = errors.New("flag: retry") - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Get() interface{} { return bool(*b) } - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Get() interface{} { return int(*i) } - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Get() interface{} { return int64(*i) } - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Get() interface{} { return uint(*i) } - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Get() interface{} { return uint64(*i) } - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} - -func (s *stringValue) Get() interface{} { return string(*s) } - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Get() interface{} { return float64(*f) } - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Get() interface{} { return time.Duration(*d) } - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -// -// If a Value has an IsBoolFlag() bool method returning true, -// the command-line parser makes -name equivalent to -name=true -// rather than using the next command-line argument. -type Value interface { - String() string - Set(string) error -} - -// Getter is an interface that allows the contents of a Value to be retrieved. -// It wraps the Value interface, rather than being part of it, because it -// appeared after Go 1 and its compatibility rules. All Value types provided -// by this package satisfy the Getter interface. -type Getter interface { - Value - Get() interface{} -} - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -// ErrorHandling strategies available when a flag parsing error occurs -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// A FlagSet represents a set of defined flags. The zero value of a FlagSet -// has no name and has ContinueOnError error handling. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - ShortUsage func() - - name string - parsed bool - actual map[string]*Flag - formal map[string]*Flag - args []string // arguments after flags - errorHandling ErrorHandling - output io.Writer // nil means stderr; use Out() accessor - nArgRequirements []nArgRequirement -} - -// A Flag represents the state of a flag. -type Flag struct { - Names []string // name as it appears on command line - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message -} - -type flagSlice []string - -func (p flagSlice) Len() int { return len(p) } -func (p flagSlice) Less(i, j int) bool { - pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") - lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) - if lpi != lpj { - return lpi < lpj - } - return pi < pj -} -func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[string]*Flag) []*Flag { - var list flagSlice - - // The sorted list is based on the first name, when flag map might use the other names. - nameMap := make(map[string]string) - - for n, f := range flags { - fName := strings.TrimPrefix(f.Names[0], "#") - nameMap[fName] = n - if len(f.Names) == 1 { - list = append(list, fName) - continue - } - - found := false - for _, name := range list { - if name == fName { - found = true - break - } - } - if !found { - list = append(list, fName) - } - } - sort.Sort(list) - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[nameMap[name]] - } - return result -} - -// Name returns the name of the FlagSet. -func (fs *FlagSet) Name() string { - return fs.name -} - -// Out returns the destination for usage and error messages. -func (fs *FlagSet) Out() io.Writer { - if fs.output == nil { - return os.Stderr - } - return fs.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (fs *FlagSet) SetOutput(output io.Writer) { - fs.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (fs *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(fs.formal) { - fn(flag) - } -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (fs *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(fs.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (fs *FlagSet) Lookup(name string) *Flag { - return fs.formal[name] -} - -// IsSet indicates whether the specified flag is set in the given FlagSet -func (fs *FlagSet) IsSet(name string) bool { - return fs.actual[name] != nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.formal[name] -} - -// IsSet indicates whether the specified flag was specified at all on the cmd line. -func IsSet(name string) bool { - return CommandLine.IsSet(name) -} - -type nArgRequirementType int - -// Indicator used to pass to BadArgs function -const ( - Exact nArgRequirementType = iota - Max - Min -) - -type nArgRequirement struct { - Type nArgRequirementType - N int -} - -// Require adds a requirement about the number of arguments for the FlagSet. -// The first parameter can be Exact, Max, or Min to respectively specify the exact, -// the maximum, or the minimal number of arguments required. -// The actual check is done in FlagSet.CheckArgs(). -func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { - fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) -} - -// CheckArgs uses the requirements set by FlagSet.Require() to validate -// the number of arguments. If the requirements are not met, -// an error message string is returned. -func (fs *FlagSet) CheckArgs() (message string) { - for _, req := range fs.nArgRequirements { - var arguments string - if req.N == 1 { - arguments = "1 argument" - } else { - arguments = fmt.Sprintf("%d arguments", req.N) - } - - str := func(kind string) string { - return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) - } - - switch req.Type { - case Exact: - if fs.NArg() != req.N { - return str("") - } - case Max: - if fs.NArg() > req.N { - return str("a maximum of ") - } - case Min: - if fs.NArg() < req.N { - return str("a minimum of ") - } - } - } - return "" -} - -// Set sets the value of the named flag. -func (fs *FlagSet) Set(name, value string) error { - flag, ok := fs.formal[name] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if err := flag.Value.Set(value); err != nil { - return err - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (fs *FlagSet) PrintDefaults() { - writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) - home := homedir.Get() - - // Don't substitute when HOME is / - if runtime.GOOS != "windows" && home == "/" { - home = "" - } - - // Add a blank line between cmd description and list of options - if fs.FlagCount() > 0 { - fmt.Fprintln(writer, "") - } - - fs.VisitAll(func(flag *Flag) { - format := " -%s=%s" - names := []string{} - for _, name := range flag.Names { - if name[0] != '#' { - names = append(names, name) - } - } - if len(names) > 0 && len(flag.Usage) > 0 { - val := flag.DefValue - - if home != "" && strings.HasPrefix(val, home) { - val = homedir.GetShortcutString() + val[len(home):] - } - - fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) - for i, line := range strings.Split(flag.Usage, "\n") { - if i != 0 { - line = " " + line - } - fmt.Fprintln(writer, "\t", line) - } - } - }) - writer.Flush() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(fs *FlagSet) { - if fs.name == "" { - fmt.Fprintf(fs.Out(), "Usage:\n") - } else { - fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) - } - fs.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// Usage prints to standard error a usage message documenting the standard command layout -// The function is a variable that may be changed to point to a custom function. -var ShortUsage = func() { - fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) -} - -// FlagCount returns the number of flags that have been defined. -func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } - -// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. -func (fs *FlagSet) FlagCountUndeprecated() int { - count := 0 - for _, flag := range sortFlags(fs.formal) { - for _, name := range flag.Names { - if name[0] != '#' { - count++ - break - } - } - } - return count -} - -// NFlag returns the number of flags that have been set. -func (fs *FlagSet) NFlag() int { return len(fs.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (fs *FlagSet) Arg(i int) string { - if i < 0 || i >= len(fs.args) { - return "" - } - return fs.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (fs *FlagSet) NArg() int { return len(fs.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (fs *FlagSet) Args() []string { return fs.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { - fs.Var(newBoolValue(value, p), names, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, names []string, value bool, usage string) { - CommandLine.Var(newBoolValue(value, p), names, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { - p := new(bool) - fs.BoolVar(p, names, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(names []string, value bool, usage string) *bool { - return CommandLine.Bool(names, value, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { - fs.Var(newIntValue(value, p), names, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, names []string, value int, usage string) { - CommandLine.Var(newIntValue(value, p), names, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (fs *FlagSet) Int(names []string, value int, usage string) *int { - p := new(int) - fs.IntVar(p, names, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(names []string, value int, usage string) *int { - return CommandLine.Int(names, value, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { - fs.Var(newInt64Value(value, p), names, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, names []string, value int64, usage string) { - CommandLine.Var(newInt64Value(value, p), names, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { - p := new(int64) - fs.Int64Var(p, names, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(names []string, value int64, usage string) *int64 { - return CommandLine.Int64(names, value, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { - fs.Var(newUintValue(value, p), names, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, names []string, value uint, usage string) { - CommandLine.Var(newUintValue(value, p), names, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { - p := new(uint) - fs.UintVar(p, names, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(names []string, value uint, usage string) *uint { - return CommandLine.Uint(names, value, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { - fs.Var(newUint64Value(value, p), names, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, names []string, value uint64, usage string) { - CommandLine.Var(newUint64Value(value, p), names, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { - p := new(uint64) - fs.Uint64Var(p, names, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(names []string, value uint64, usage string) *uint64 { - return CommandLine.Uint64(names, value, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { - fs.Var(newStringValue(value, p), names, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, names []string, value string, usage string) { - CommandLine.Var(newStringValue(value, p), names, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (fs *FlagSet) String(names []string, value string, usage string) *string { - p := new(string) - fs.StringVar(p, names, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(names []string, value string, usage string) *string { - return CommandLine.String(names, value, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { - fs.Var(newFloat64Value(value, p), names, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, names []string, value float64, usage string) { - CommandLine.Var(newFloat64Value(value, p), names, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { - p := new(float64) - fs.Float64Var(p, names, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(names []string, value float64, usage string) *float64 { - return CommandLine.Float64(names, value, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - fs.Var(newDurationValue(value, p), names, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - CommandLine.Var(newDurationValue(value, p), names, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - fs.DurationVar(p, names, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(names []string, value time.Duration, usage string) *time.Duration { - return CommandLine.Duration(names, value, usage) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (fs *FlagSet) Var(value Value, names []string, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{names, usage, value, value.String()} - for _, name := range names { - name = strings.TrimPrefix(name, "#") - _, alreadythere := fs.formal[name] - if alreadythere { - var msg string - if fs.name == "" { - msg = fmt.Sprintf("flag redefined: %s", name) - } else { - msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) - } - fmt.Fprintln(fs.Out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if fs.formal == nil { - fs.formal = make(map[string]*Flag) - } - fs.formal[name] = flag - } -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, names []string, usage string) { - CommandLine.Var(value, names, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (fs *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(fs.Out(), err) - if os.Args[0] == fs.name { - fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) - } else { - fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (fs *FlagSet) usage() { - if fs == CommandLine { - Usage() - } else if fs.Usage == nil { - defaultUsage(fs) - } else { - fs.Usage() - } -} - -func trimQuotes(str string) string { - if len(str) == 0 { - return str - } - type quote struct { - start, end byte - } - - // All valid quote types. - quotes := []quote{ - // Double quotes - { - start: '"', - end: '"', - }, - - // Single quotes - { - start: '\'', - end: '\'', - }, - } - - for _, quote := range quotes { - // Only strip if outermost match. - if str[0] == quote.start && str[len(str)-1] == quote.end { - str = str[1 : len(str)-1] - break - } - } - - return str -} - -// parseOne parses one flag. It reports whether a flag was seen. -func (fs *FlagSet) parseOne() (bool, string, error) { - if len(fs.args) == 0 { - return false, "", nil - } - s := fs.args[0] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - return false, "", nil - } - if s[1] == '-' && len(s) == 2 { // "--" terminates the flags - fs.args = fs.args[1:] - return false, "", nil - } - name := s[1:] - if len(name) == 0 || name[0] == '=' { - return false, "", fs.failf("bad flag syntax: %s", s) - } - - // it's a flag. does it have an argument? - fs.args = fs.args[1:] - hasValue := false - value := "" - if i := strings.Index(name, "="); i != -1 { - value = trimQuotes(name[i+1:]) - hasValue = true - name = name[:i] - } - - m := fs.formal - flag, alreadythere := m[name] // BUG - if !alreadythere { - if name == "-help" || name == "help" || name == "h" { // special case for nice help message. - fs.usage() - return false, "", ErrHelp - } - if len(name) > 0 && name[0] == '-' { - return false, "", fs.failf("flag provided but not defined: -%s", name) - } - return false, name, ErrRetry - } - if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg - if hasValue { - if err := fv.Set(value); err != nil { - return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) - } - } else { - fv.Set("true") - } - } else { - // It must have a value, which might be the next argument. - if !hasValue && len(fs.args) > 0 { - // value is the next arg - hasValue = true - value, fs.args = fs.args[0], fs.args[1:] - } - if !hasValue { - return false, "", fs.failf("flag needs an argument: -%s", name) - } - if err := flag.Value.Set(value); err != nil { - return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) - } - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - for i, n := range flag.Names { - if n == fmt.Sprintf("#%s", name) { - replacement := "" - for j := i; j < len(flag.Names); j++ { - if flag.Names[j][0] != '#' { - replacement = flag.Names[j] - break - } - } - if replacement != "" { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) - } else { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) - } - } - } - return true, "", nil -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (fs *FlagSet) Parse(arguments []string) error { - fs.parsed = true - fs.args = arguments - for { - seen, name, err := fs.parseOne() - if seen { - continue - } - if err == nil { - break - } - if err == ErrRetry { - if len(name) > 1 { - err = nil - for _, letter := range strings.Split(name, "") { - fs.args = append([]string{"-" + letter}, fs.args...) - seen2, _, err2 := fs.parseOne() - if seen2 { - continue - } - if err2 != nil { - err = fs.failf("flag provided but not defined: -%s", name) - break - } - } - if err == nil { - continue - } - } else { - err = fs.failf("flag provided but not defined: -%s", name) - } - } - switch fs.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - return nil -} - -// ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls fs.Parse(args) and prints a relevant error message if there are -// incorrect number of arguments. It returns error only if error handling is -// set to ContinueOnError and parsing fails. If error handling is set to -// ExitOnError, it's safe to ignore the return value. -func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { - var help *bool - if withHelp { - help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") - } - if err := fs.Parse(args); err != nil { - return err - } - if help != nil && *help { - fs.SetOutput(os.Stdout) - fs.Usage() - os.Exit(0) - } - if str := fs.CheckArgs(); str != "" { - fs.SetOutput(os.Stderr) - fs.ReportError(str, withHelp) - fs.ShortUsage() - os.Exit(1) - } - return nil -} - -// ReportError is a utility method that prints a user-friendly message -// containing the error that occured during parsing and a suggestion to get help -func (fs *FlagSet) ReportError(str string, withHelp bool) { - if withHelp { - if os.Args[0] == fs.Name() { - str += ".\nSee '" + os.Args[0] + " --help'" - } else { - str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" - } - } - fmt.Fprintf(fs.Out(), "docker: %s.\n", str) -} - -// Parsed reports whether fs.Parse has been called. -func (fs *FlagSet) Parsed() bool { - return fs.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -// The top-level functions such as BoolVar, Arg, and on are wrappers for the -// methods of CommandLine. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - } - return f -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { - fs.name = name - fs.errorHandling = errorHandling -} - -type mergeVal struct { - Value - key string - fset *FlagSet -} - -func (v mergeVal) Set(s string) error { - return v.fset.Set(v.key, s) -} - -func (v mergeVal) IsBoolFlag() bool { - if b, ok := v.Value.(boolFlag); ok { - return b.IsBoolFlag() - } - return false -} - -// Merge is an helper function that merges n FlagSets into a single dest FlagSet -// In case of name collision between the flagsets it will apply -// the destination FlagSet's errorHandling behaviour. -func Merge(dest *FlagSet, flagsets ...*FlagSet) error { - for _, fset := range flagsets { - for k, f := range fset.formal { - if _, ok := dest.formal[k]; ok { - var err error - if fset.name == "" { - err = fmt.Errorf("flag redefined: %s", k) - } else { - err = fmt.Errorf("%s flag redefined: %s", fset.name, k) - } - fmt.Fprintln(fset.Out(), err.Error()) - // Happens only if flags are declared with identical names - switch dest.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - newF := *f - newF.Value = mergeVal{f.Value, k, fset} - dest.formal[k] = &newF - } - } - return nil -} - -// IsEmpty reports if the FlagSet is actually empty. -func (fs *FlagSet) IsEmpty() bool { - return len(fs.actual) == 0 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go deleted file mode 100644 index 85f32c8aa..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/mflag/flag_test.go +++ /dev/null @@ -1,516 +0,0 @@ -// Copyright 2014-2015 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mflag - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - "testing" - "time" -) - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = NewFlagSet(os.Args[0], ContinueOnError) - Usage = usage -} -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, false, "bool value") - Int([]string{"test_int"}, 0, "int value") - Int64([]string{"test_int64"}, 0, "int64 value") - Uint([]string{"test_uint"}, 0, "uint value") - Uint64([]string{"test_uint64"}, 0, "uint64 value") - String([]string{"test_string"}, "0", "string value") - Float64([]string{"test_float64"}, 0, "float64 value") - Duration([]string{"test_duration"}, 0, "time.Duration value") - - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - m[name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", name) - } - } - } - } - VisitAll(visitor) - if len(m) != 8 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - desired = "1" - Visit(visitor) - if len(m) != 8 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { - for _, name := range f.Names { - flagNames = append(flagNames, name) - } - }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestGet(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, true, "bool value") - Int([]string{"test_int"}, 1, "int value") - Int64([]string{"test_int64"}, 2, "int64 value") - Uint([]string{"test_uint"}, 3, "uint value") - Uint64([]string{"test_uint64"}, 4, "uint64 value") - String([]string{"test_string"}, "5", "string value") - Float64([]string{"test_float64"}, 6, "float64 value") - Duration([]string{"test_duration"}, 7, "time.Duration value") - - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - g, ok := f.Value.(Getter) - if !ok { - t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) - return - } - switch name { - case "test_bool": - ok = g.Get() == true - case "test_int": - ok = g.Get() == int(1) - case "test_int64": - ok = g.Get() == int64(2) - case "test_uint": - ok = g.Get() == uint(3) - case "test_uint64": - ok = g.Get() == uint64(4) - case "test_string": - ok = g.Get() == "5" - case "test_float64": - ok = g.Get() == float64(6) - case "test_duration": - ok = g.Get() == time.Duration(7) - } - if !ok { - t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) - } - } - } - } - VisitAll(visitor) -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool([]string{"bool"}, false, "bool value") - bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") - f.Bool([]string{"bool3"}, false, "bool3 value") - bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") - intFlag := f.Int([]string{"-int"}, 0, "int value") - int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") - uintFlag := f.Uint([]string{"uint"}, 0, "uint value") - uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") - stringFlag := f.String([]string{"string"}, "0", "string value") - f.String([]string{"string2"}, "0", "string2 value") - singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") - doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") - mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") - mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") - nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") - nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") - float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") - durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") - extra := "one-extra-argument" - args := []string{ - "-bool", - "-bool2=true", - "-bool4=false", - "--int", "22", - "--int64", "0x23", - "-uint", "24", - "--uint64", "25", - "-string", "hello", - "-squote='single'", - `-dquote="double"`, - `-mquote='mixed"`, - `-mquote2="mixed2'`, - `-nquote="'single nested'"`, - `-nquote2='"double nested"'`, - "-float64", "2718e28", - "-duration", "2m", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if !f.IsSet("bool2") { - t.Error("bool2 should be marked as set") - } - if f.IsSet("bool3") { - t.Error("bool3 should not be marked as set") - } - if !f.IsSet("bool4") { - t.Error("bool4 should be marked as set") - } - if *bool4Flag != false { - t.Error("bool4 flag should be false, is ", *bool4Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if !f.IsSet("string") { - t.Error("string flag should be marked as set") - } - if f.IsSet("string2") { - t.Error("string2 flag should not be marked as set") - } - if *singleQuoteFlag != "single" { - t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) - } - if *doubleQuoteFlag != "double" { - t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) - } - if *mixedQuoteFlag != `'mixed"` { - t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) - } - if *mixed2QuoteFlag != `"mixed2'` { - t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) - } - if *nestedQuoteFlag != "'single nested'" { - t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) - } - if *nested2QuoteFlag != `"double nested"` { - t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func testPanic(f *FlagSet, t *testing.T) { - f.Int([]string{"-int"}, 0, "int value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - args := []string{ - "-int", "21", - } - f.Parse(args) -} - -func TestParsePanic(t *testing.T) { - ResetForTesting(func() {}) - testPanic(CommandLine, t) -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(CommandLine, t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.Var(&v, []string{"v"}, "usage") - if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -// Declare a user-defined boolean flag type. -type boolFlagVar struct { - count int -} - -func (b *boolFlagVar) String() string { - return fmt.Sprintf("%d", b.count) -} - -func (b *boolFlagVar) Set(value string) error { - if value == "true" { - b.count++ - } - return nil -} - -func (b *boolFlagVar) IsBoolFlag() bool { - return b.count < 4 -} - -func TestUserDefinedBool(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var b boolFlagVar - var err error - flags.Var(&b, []string{"b"}, "usage") - if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { - if b.count < 4 { - t.Error(err) - } - } - - if b.count != 4 { - t.Errorf("want: %d; got: %d", 4, b.count) - } - - if err == nil { - t.Error("expected error; got none") - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"-unknown"}) - if out := buf.String(); !strings.Contains(out, "-unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} - before := Bool([]string{"before"}, false, "") - if err := CommandLine.Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = Args() - after := Bool([]string{"after"}, false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"-flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by -flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"-help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, []string{"help"}, false, "help flag") - helpCalled = false - err = fs.Parse([]string{"-help"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -// Test the flag count functions. -func TestFlagCounts(t *testing.T) { - fs := NewFlagSet("help test", ContinueOnError) - var flag bool - fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") - fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") - - if fs.FlagCount() != 6 { - t.Fatal("FlagCount wrong. ", fs.FlagCount()) - } - if fs.FlagCountUndeprecated() != 4 { - t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) - } - if fs.NFlag() != 0 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } - err := fs.Parse([]string{"-fd", "-g", "-flag4"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if fs.NFlag() != 4 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } -} - -// Show up bug in sortFlags -func TestSortFlags(t *testing.T) { - fs := NewFlagSet("help TestSortFlags", ContinueOnError) - - var err error - - var b bool - fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") - - err = fs.Parse([]string{"--banana=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - count := 0 - - fs.VisitAll(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("VisitAll should not return a nil flag") - } - }) - flagcount := fs.FlagCount() - if flagcount != count { - t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) - } - // Make sure its idempotent - if flagcount != fs.FlagCount() { - t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) - } - - count = 0 - fs.Visit(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("Visit should not return a nil flag") - } - }) - nflag := fs.NFlag() - if nflag != count { - t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) - } - if nflag != fs.NFlag() { - t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go deleted file mode 100644 index e326a1191..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go +++ /dev/null @@ -1,187 +0,0 @@ -// Package parsers provides helper functions to parse and validate different type -// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel -// operating system versions. -package parsers - -import ( - "fmt" - "net/url" - "path" - "runtime" - "strconv" - "strings" -) - -// ParseHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr -// FIXME: Change this not to receive default value as parameter -func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) { - addr = strings.TrimSpace(addr) - if addr == "" { - if runtime.GOOS != "windows" { - addr = fmt.Sprintf("unix://%s", defaultUnixAddr) - } else { - // Note - defaultTCPAddr already includes tcp:// prefix - addr = defaultTCPAddr - } - } - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], defaultTCPAddr) - case "unix": - return ParseUnixAddr(addrParts[1], defaultUnixAddr) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// ParseUnixAddr parses and validates that the specified address is a valid UNIX -// socket address. It returns a formatted UNIX socket address, either using the -// address parsed from addr, or the contents of defaultAddr if addr is a blank -// string. -func ParseUnixAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "unix://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("unix://%s", addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from addr, or the contents of defaultAddr if addr is a blank string. -func ParseTCPAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr) - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - hostParts := strings.Split(u.Host, ":") - if len(hostParts) != 2 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - host := hostParts[0] - if host == "" { - host = defaultAddr - } - - p, err := strconv.Atoi(hostParts[1]) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } - return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil -} - -// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb -func ParseRepositoryTag(repos string) (string, string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get an HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go deleted file mode 100644 index 903c66afb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package parsers - -import ( - "strings" - "testing" -) - -func TestParseHost(t *testing.T) { - var ( - defaultHTTPHost = "127.0.0.1" - defaultUnix = "/var/run/docker.sock" - ) - invalids := map[string]string{ - "0.0.0.0": "Invalid bind address format: 0.0.0.0", - "tcp://": "Invalid proto, expected tcp: ", - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", - } - valids := map[string]string{ - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "": "unix:///var/run/docker.sock", - "unix:///run/docker.sock": "unix:///run/docker.sock", - "unix://": "unix:///var/run/docker.sock", - "fd://": "fd://", - "fd://something": "fd://something", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseHost(defaultHTTPHost, defaultUnix, invalidAddr); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseHost(defaultHTTPHost, defaultUnix, validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v", validAddr, expectedAddr, addr) - } - } -} - -func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := ParseUnixAddr("unix://tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } -} - -func TestParseRepositoryTag(t *testing.T) { - if repo, tag := ParseRepositoryTag("root"); repo != "root" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("root:tag"); repo != "root" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "root", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("root@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "root" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "root", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("user/repo"); repo != "user/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("user/repo:tag"); repo != "user/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "user/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("user/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "user/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "user/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } - if repo, tag := ParseRepositoryTag("url:5000/repo"); repo != "url:5000/repo" || tag != "" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "", repo, tag) - } - if repo, tag := ParseRepositoryTag("url:5000/repo:tag"); repo != "url:5000/repo" || tag != "tag" { - t.Errorf("Expected repo: '%s' and tag: '%s', got '%s' and '%s'", "url:5000/repo", "tag", repo, tag) - } - if repo, digest := ParseRepositoryTag("url:5000/repo@sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"); repo != "url:5000/repo" || digest != "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" { - t.Errorf("Expected repo: '%s' and digest: '%s', got '%s' and '%s'", "url:5000/repo", "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", repo, digest) - } -} - -func TestParsePortMapping(t *testing.T) { - if _, err := PartParser("ip:public:private", "192.168.1.1:80"); err == nil { - t.Fatalf("Expected an error, got %v", err) - } - data, err := PartParser("ip:public:private", "192.168.1.1:80:8080") - if err != nil { - t.Fatal(err) - } - - if len(data) != 3 { - t.FailNow() - } - if data["ip"] != "192.168.1.1" { - t.Fail() - } - if data["public"] != "80" { - t.Fail() - } - if data["private"] != "8080" { - t.Fail() - } -} - -func TestParseKeyValueOpt(t *testing.T) { - invalids := map[string]string{ - "": "Unable to parse key/value option: ", - "key": "Unable to parse key/value option: key", - } - for invalid, expectedError := range invalids { - if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) - } - } - valids := map[string][]string{ - "key=value": {"key", "value"}, - " key = value ": {"key", "value"}, - "key=value1=value2": {"key", "value1=value2"}, - " key = value1 = value2 ": {"key", "value1 = value2"}, - } - for valid, expectedKeyValue := range valids { - key, value, err := ParseKeyValueOpt(valid) - if err != nil { - t.Fatal(err) - } - if key != expectedKeyValue[0] || value != expectedKeyValue[1] { - t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) - } - } -} - -func TestParsePortRange(t *testing.T) { - if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { - t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) - } -} - -func TestParsePortRangeEmpty(t *testing.T) { - if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { - t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) - } -} - -func TestParsePortRangeWithNoRange(t *testing.T) { - start, end, err := ParsePortRange("8080") - if err != nil { - t.Fatal(err) - } - if start != 8080 || end != 8080 { - t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) - } -} - -func TestParsePortRangeIncorrectRange(t *testing.T) { - if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectEndRange(t *testing.T) { - if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParsePortRangeIncorrectStartRange(t *testing.T) { - if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } - - if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { - t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go deleted file mode 100644 index 515fb4d05..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool *BufioReaderPool - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go deleted file mode 100644 index 78689800b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools/pools_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package pools - -import ( - "bufio" - "bytes" - "io" - "strings" - "testing" -) - -func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - reader := BufioReader32KPool.Get(nil) - if reader == nil { - t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") - } -} - -func TestBufioReaderPoolPutAndGet(t *testing.T) { - sr := bufio.NewReader(strings.NewReader("foobar")) - reader := BufioReader32KPool.Get(sr) - if reader == nil { - t.Fatalf("BufioReaderPool should not return a nil reader.") - } - // verify the first 3 byte - buf1 := make([]byte, 3) - _, err := reader.Read(buf1) - if err != nil { - t.Fatal(err) - } - if actual := string(buf1); actual != "foo" { - t.Fatalf("The first letter should have been 'foo' but was %v", actual) - } - BufioReader32KPool.Put(reader) - // Try to read the next 3 bytes - _, err = sr.Read(make([]byte, 3)) - if err == nil || err != io.EOF { - t.Fatalf("The buffer should have been empty, issue an EOF error.") - } -} - -type simpleReaderCloser struct { - io.Reader - closed bool -} - -func (r *simpleReaderCloser) Close() error { - r.closed = true - return nil -} - -func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { - br := bufio.NewReader(strings.NewReader("")) - sr := &simpleReaderCloser{ - Reader: strings.NewReader("foobar"), - closed: false, - } - reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) - if reader == nil { - t.Fatalf("NewReadCloserWrapper should not return a nil reader.") - } - // Verify the content of reader - buf := make([]byte, 3) - _, err := reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "foo" { - t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) - } - reader.Close() - // Read 3 more bytes "bar" - _, err = reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "bar" { - t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) - } - if !sr.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} - -func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - writer := BufioWriter32KPool.Get(nil) - if writer == nil { - t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") - } -} - -func TestBufioWriterPoolPutAndGet(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - writer := BufioWriter32KPool.Get(bw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - // Make sure we Flush all the way ? - writer.Flush() - bw.Flush() - if len(buf.Bytes()) != 6 { - t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) - } - // Reset the buffer - buf.Reset() - BufioWriter32KPool.Put(writer) - // Try to write something - written, err = writer.Write([]byte("barfoo")) - if err != nil { - t.Fatal(err) - } - // If we now try to flush it, it should panic (the writer is nil) - // recover it - defer func() { - if r := recover(); r == nil { - t.Fatal("Trying to flush the writter should have 'paniced', did not.") - } - }() - writer.Flush() -} - -type simpleWriterCloser struct { - io.Writer - closed bool -} - -func (r *simpleWriterCloser) Close() error { - r.closed = true - return nil -} - -func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - sw := &simpleWriterCloser{ - Writer: new(bytes.Buffer), - closed: false, - } - bw.Flush() - writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - writer.Close() - if !sw.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go deleted file mode 100644 index dd52b9082..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go deleted file mode 100644 index 63b3df79f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,168 +0,0 @@ -package stdcopy - -import ( - "encoding/binary" - "errors" - "io" - - "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" -) - -const ( - StdWriterPrefixLen = 8 - StdWriterFdIndex = 0 - StdWriterSizeIndex = 4 -) - -type StdType [StdWriterPrefixLen]byte - -var ( - Stdin StdType = StdType{0: 0} - Stdout StdType = StdType{0: 1} - Stderr StdType = StdType{0: 2} -) - -type StdWriter struct { - io.Writer - prefix StdType - sizeBuf []byte -} - -func (w *StdWriter) Write(buf []byte) (n int, err error) { - var n1, n2 int - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - binary.BigEndian.PutUint32(w.prefix[4:], uint32(len(buf))) - n1, err = w.Writer.Write(w.prefix[:]) - if err != nil { - n = n1 - StdWriterPrefixLen - } else { - n2, err = w.Writer.Write(buf) - n = n1 + n2 - StdWriterPrefixLen - } - if n < 0 { - n = 0 - } - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) *StdWriter { - return &StdWriter{ - Writer: w, - prefix: t, - sizeBuf: make([]byte, 4), - } -} - -var ErrInvalidStdHeader = errors.New("Unrecognized input header") - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, 32*1024+StdWriterPrefixLen+1) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < StdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < StdWriterPrefixLen { - logrus.Debugf("Corrupted prefix: %v", buf[:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading header: %s", er) - return 0, er - } - } - - // Check the first byte to know where to write - switch buf[StdWriterFdIndex] { - case 0: - fallthrough - case 1: - // Write on stdout - out = dstout - case 2: - // Write on stderr - out = dsterr - default: - logrus.Debugf("Error selecting output fd: (%d)", buf[StdWriterFdIndex]) - return 0, ErrInvalidStdHeader - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[StdWriterSizeIndex : StdWriterSizeIndex+4])) - logrus.Debugf("framesize: %d", frameSize) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+StdWriterPrefixLen > bufLen { - logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+StdWriterPrefixLen-bufLen+1, len(buf)) - buf = append(buf, make([]byte, frameSize+StdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+StdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+StdWriterPrefixLen { - logrus.Debugf("Corrupted frame: %v", buf[StdWriterPrefixLen:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading frame: %s", er) - return 0, er - } - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[StdWriterPrefixLen : frameSize+StdWriterPrefixLen]) - if ew != nil { - logrus.Debugf("Error writing frame: %s", ew) - return 0, ew - } - // If the frame has not been fully written: error - if nw != frameSize { - logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+StdWriterPrefixLen:]) - // Move the index - nr -= frameSize + StdWriterPrefixLen - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go deleted file mode 100644 index a9fd73a49..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy/stdcopy_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package stdcopy - -import ( - "bytes" - "io/ioutil" - "strings" - "testing" -) - -func TestNewStdWriter(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - if writer == nil { - t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") - } -} - -func TestWriteWithUnitializedStdWriter(t *testing.T) { - writer := StdWriter{ - Writer: nil, - prefix: Stdout, - sizeBuf: make([]byte, 4), - } - n, err := writer.Write([]byte("Something here")) - if n != 0 || err == nil { - t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") - } -} - -func TestWriteWithNilBytes(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - n, err := writer.Write(nil) - if err != nil { - t.Fatalf("Shouldn't have fail when given no data") - } - if n > 0 { - t.Fatalf("Write should have written 0 byte, but has written %d", n) - } -} - -func TestWrite(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test StdWrite.Write") - n, err := writer.Write(data) - if err != nil { - t.Fatalf("Error while writing with StdWrite") - } - if n != len(data) { - t.Fatalf("Write should have writen %d byte but wrote %d.", len(data), n) - } -} - -func TestStdCopyWithInvalidInputHeader(t *testing.T) { - dstOut := NewStdWriter(ioutil.Discard, Stdout) - dstErr := NewStdWriter(ioutil.Discard, Stderr) - src := strings.NewReader("Invalid input") - _, err := StdCopy(dstOut, dstErr, src) - if err == nil { - t.Fatal("StdCopy with invalid input header should fail.") - } -} - -func TestStdCopyWithCorruptedPrefix(t *testing.T) { - data := []byte{0x01, 0x02, 0x03} - src := bytes.NewReader(data) - written, err := StdCopy(nil, nil, src) - if err != nil { - t.Fatalf("StdCopy should not return an error with corrupted prefix.") - } - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } -} - -func BenchmarkWrite(b *testing.B) { - w := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test line for testing stdwriter performance\n") - data = bytes.Repeat(data, 100) - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := w.Write(data); err != nil { - b.Fatal(err) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go deleted file mode 100644 index 63045186f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/errors.go +++ /dev/null @@ -1,9 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 23f7c618b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -const ( - EVENT_ALL_ACCESS = 0x1F0003 - EVENT_MODIFY_STATUS = 0x0002 -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 = 0 - if manualReset { - _p1 = 1 - } - var _p2 uint32 = 0 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 = 0 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go deleted file mode 100644 index e1f70e8da..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package system - -import ( - "os" -) - -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go deleted file mode 100644 index 90b500608..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/filesys_windows.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "regexp" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go deleted file mode 100644 index d0e43b370..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*Stat_t, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go deleted file mode 100644 index 6bac492eb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import ( - "os" - "testing" -) - -// TestLstat tests Lstat for existing and non existing files -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go deleted file mode 100644 index eee1be26e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/lstat_windows.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Some explanation for my own sanity, and hopefully maintainers in the -// future. -// -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. - -func Lstat(path string) (*Stat_t, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &Stat_t{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go deleted file mode 100644 index 41f2bab60..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,71 +0,0 @@ -package system - -import ( - "bufio" - "errors" - "io" - "os" - "strconv" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" -) - -var ( - ErrMalformed = errors.New("malformed file") -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given a io.Reader to the file. -// -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go deleted file mode 100644 index 87830ccb2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package system - -import ( - "strings" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" -) - -// TestMemInfo tests parseMemInfo with a static meminfo string -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 604d33875..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!windows - -package system - -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go deleted file mode 100644 index d46642598..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go deleted file mode 100644 index 26617eb08..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go deleted file mode 100644 index 1811542ab..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/mknod_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build windows - -package system - -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go deleted file mode 100644 index e2ecfe52f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Stat_t type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file -type Stat_t struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -func (s Stat_t) Mode() uint32 { - return s.mode -} - -func (s Stat_t) Uid() uint32 { - return s.uid -} - -func (s Stat_t) Gid() uint32 { - return s.gid -} - -func (s Stat_t) Rdev() uint64 { - return s.rdev -} - -func (s Stat_t) Size() int64 { - return s.size -} - -func (s Stat_t) Mtim() syscall.Timespec { - return s.mtim -} - -func (s Stat_t) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go deleted file mode 100644 index 4b2198b3a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return &Stat_t{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*Stat_t, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go deleted file mode 100644 index 80262d951..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return &Stat_t{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.Stat_t from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*Stat_t, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go deleted file mode 100644 index 453412920..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package system - -import ( - "os" - "syscall" - "testing" -) - -// TestFromStatT tests fromStatT for a tempfile -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - - s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.Uid() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.Gid() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go deleted file mode 100644 index 5251ae212..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.Stat_t type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*Stat_t, error) { - return &Stat_t{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go deleted file mode 100644 index b1fd39e83..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -type Stat_t struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -func (s Stat_t) Name() string { - return s.name -} - -func (s Stat_t) Size() int64 { - return s.size -} - -func (s Stat_t) Mode() os.FileMode { - return s.mode -} - -func (s Stat_t) ModTime() time.Time { - return s.modTime -} - -func (s Stat_t) IsDir() bool { - return s.isDir -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go deleted file mode 100644 index fddbecd39..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go deleted file mode 100644 index 3be563f89..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/umask_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build windows - -package system - -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go deleted file mode 100644 index 4c6002fe8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,11 +0,0 @@ -package system - -import "syscall" - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go deleted file mode 100644 index ceaa044c1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,24 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go deleted file mode 100644 index 8f9029827..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - AT_FDCWD := -100 - AT_SYMLINK_NOFOLLOW := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(AT_FDCWD), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(AT_SYMLINK_NOFOLLOW), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return syscall.UtimesNano(path, ts) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go deleted file mode 100644 index 350cce1ea..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -// prepareFiles creates files for testing in the temp directory -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{0, 0}, {0, 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go deleted file mode 100644 index adf2734f2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux,!freebsd,!darwin - -package system - -import "syscall" - -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} - -func UtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go deleted file mode 100644 index 00edb201b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,59 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Returns a nil slice and nil error if the xattr is not set -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0060c167d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux - -package system - -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go deleted file mode 100644 index 8fb0d804d..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package ulimit provides structure and helper function to parse and represent -// resource limits (Rlimit and Ulimit, its human friendly version). -package ulimit - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// Parse parses and returns a Ulimit from the specified string. -func Parse(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go deleted file mode 100644 index 1e8c881f5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ulimit - -import "testing" - -func TestParseValid(t *testing.T) { - u1 := &Ulimit{"nofile", 1024, 512} - if u2, _ := Parse("nofile=512:1024"); *u1 != *u2 { - t.Fatalf("expected %q, but got %q", u1, u2) - } -} - -func TestParseInvalidLimitType(t *testing.T) { - if _, err := Parse("notarealtype=1024:1024"); err == nil { - t.Fatalf("expected error on invalid ulimit type") - } -} - -func TestParseBadFormat(t *testing.T) { - if _, err := Parse("nofile:1024:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile"); err == nil { - t.Fatal("expected error on bad syntax") - } - - if _, err := Parse("nofile="); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:"); err == nil { - t.Fatal("expected error on bad syntax") - } - if _, err := Parse("nofile=:1024"); err == nil { - t.Fatal("expected error on bad syntax") - } -} - -func TestParseHardLessThanSoft(t *testing.T) { - if _, err := Parse("nofile:1024:1"); err == nil { - t.Fatal("expected error on hard limit less than soft limit") - } -} - -func TestParseInvalidValueType(t *testing.T) { - if _, err := Parse("nofile:asdf"); err == nil { - t.Fatal("expected error on bad value type") - } -} - -func TestStringOutput(t *testing.T) { - u := &Ulimit{"nofile", 1024, 512} - if s := u.String(); s != "nofile=512:1024" { - t.Fatal("expected String to return nofile=512:1024, but got", s) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go deleted file mode 100644 index c219a8a96..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go deleted file mode 100644 index fcfb6b7bb..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package units - -import ( - "testing" - "time" -) - -func TestHumanDuration(t *testing.T) { - // Useful duration abstractions - day := 24 * time.Hour - week := 7 * day - month := 30 * day - year := 365 * day - - assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) - assertEquals(t, "47 seconds", HumanDuration(47*time.Second)) - assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) - assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) - assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) - assertEquals(t, "About an hour", HumanDuration(1*time.Hour+45*time.Minute)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) - assertEquals(t, "3 hours", HumanDuration(3*time.Hour+59*time.Minute)) - assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) - assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) - assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) - assertEquals(t, "2 days", HumanDuration(2*day)) - assertEquals(t, "7 days", HumanDuration(7*day)) - assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) - assertEquals(t, "2 weeks", HumanDuration(2*week)) - assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) - assertEquals(t, "3 weeks", HumanDuration(3*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week)) - assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) - assertEquals(t, "4 weeks", HumanDuration(1*month)) - assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) - assertEquals(t, "8 weeks", HumanDuration(2*month)) - assertEquals(t, "3 months", HumanDuration(3*month+1*week)) - assertEquals(t, "5 months", HumanDuration(5*month+2*week)) - assertEquals(t, "13 months", HumanDuration(13*month)) - assertEquals(t, "23 months", HumanDuration(23*month)) - assertEquals(t, "24 months", HumanDuration(24*month)) - assertEquals(t, "2 years", HumanDuration(24*month+2*week)) - assertEquals(t, "3 years", HumanDuration(3*year+2*month)) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go deleted file mode 100644 index 2fde3b412..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - for size >= base { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go deleted file mode 100644 index 67c3b81e6..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package units - -import ( - "reflect" - "runtime" - "strings" - "testing" -) - -func TestBytesSize(t *testing.T) { - assertEquals(t, "1 KiB", BytesSize(1024)) - assertEquals(t, "1 MiB", BytesSize(1024*1024)) - assertEquals(t, "1 MiB", BytesSize(1048576)) - assertEquals(t, "2 MiB", BytesSize(2*MiB)) - assertEquals(t, "3.42 GiB", BytesSize(3.42*GiB)) - assertEquals(t, "5.372 TiB", BytesSize(5.372*TiB)) - assertEquals(t, "2.22 PiB", BytesSize(2.22*PiB)) -} - -func TestHumanSize(t *testing.T) { - assertEquals(t, "1 kB", HumanSize(1000)) - assertEquals(t, "1.024 kB", HumanSize(1024)) - assertEquals(t, "1 MB", HumanSize(1000000)) - assertEquals(t, "1.049 MB", HumanSize(1048576)) - assertEquals(t, "2 MB", HumanSize(2*MB)) - assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB))) - assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB))) - assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB))) -} - -func TestFromHumanSize(t *testing.T) { - assertSuccessEquals(t, 32, FromHumanSize, "32") - assertSuccessEquals(t, 32, FromHumanSize, "32b") - assertSuccessEquals(t, 32, FromHumanSize, "32B") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") - assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") - assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") - assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") - assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") - assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") - - assertError(t, FromHumanSize, "") - assertError(t, FromHumanSize, "hello") - assertError(t, FromHumanSize, "-32") - assertError(t, FromHumanSize, "32.3") - assertError(t, FromHumanSize, " 32 ") - assertError(t, FromHumanSize, "32.3Kb") - assertError(t, FromHumanSize, "32 mb") - assertError(t, FromHumanSize, "32m b") - assertError(t, FromHumanSize, "32bm") -} - -func TestRAMInBytes(t *testing.T) { - assertSuccessEquals(t, 32, RAMInBytes, "32") - assertSuccessEquals(t, 32, RAMInBytes, "32b") - assertSuccessEquals(t, 32, RAMInBytes, "32B") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") - assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") - assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") - assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") - assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") - assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") - - assertError(t, RAMInBytes, "") - assertError(t, RAMInBytes, "hello") - assertError(t, RAMInBytes, "-32") - assertError(t, RAMInBytes, "32.3") - assertError(t, RAMInBytes, " 32 ") - assertError(t, RAMInBytes, "32.3Kb") - assertError(t, RAMInBytes, "32 mb") - assertError(t, RAMInBytes, "32m b") - assertError(t, RAMInBytes, "32bm") -} - -func assertEquals(t *testing.T, expected, actual interface{}) { - if expected != actual { - t.Errorf("Expected '%v' but got '%v'", expected, actual) - } -} - -// func that maps to the parse function signatures as testing abstraction -type parseFn func(string) (int64, error) - -// Define 'String()' for pretty-print -func (fn parseFn) String() string { - fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() - return fnName[strings.LastIndex(fnName, ".")+1:] -} - -func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { - res, err := fn(arg) - if err != nil || res != expected { - t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) - } -} - -func assertError(t *testing.T, fn parseFn, arg string) { - res, err := fn(arg) - if err == nil && res != -1 { - t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go deleted file mode 100644 index 19c9d77ad..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/volume/volume.go +++ /dev/null @@ -1,61 +0,0 @@ -package volume - -// DefaultDriverName is the driver name used for the driver -// implemented in the local package. -const DefaultDriverName string = "local" - -// Driver is for creating and removing volumes. -type Driver interface { - // Name returns the name of the volume driver. - Name() string - // Create makes a new volume with the given id. - Create(string) (Volume, error) - // Remove deletes the volume. - Remove(Volume) error -} - -// Volume is a place to store data. It is backed by a specific driver, and can be mounted. -type Volume interface { - // Name returns the name of the volume - Name() string - // DriverName returns the name of the driver which owns this volume. - DriverName() string - // Path returns the absolute path to the volume. - Path() string - // Mount mounts the volume and returns the absolute path to - // where it can be consumed. - Mount() (string, error) - // Unmount unmounts the volume when it is no longer in use. - Unmount() error -} - -// read-write modes -var rwModes = map[string]bool{ - "rw": true, - "rw,Z": true, - "rw,z": true, - "z,rw": true, - "Z,rw": true, - "Z": true, - "z": true, -} - -// read-only modes -var roModes = map[string]bool{ - "ro": true, - "ro,Z": true, - "ro,z": true, - "z,ro": true, - "Z,ro": true, -} - -// ValidateMountMode will make sure the mount mode is valid. -// returns if it's a valid mount mode and if it's read-write or not. -func ValidateMountMode(mode string) (bool, bool) { - return roModes[mode] || rwModes[mode], rwModes[mode] -} - -// ReadWrite tells you if a mode string is a valid read-only mode or not. -func ReadWrite(mode string) bool { - return rwModes[mode] -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe20066..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go deleted file mode 100644 index 6f8a982ff..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup.go +++ /dev/null @@ -1,108 +0,0 @@ -package user - -import ( - "errors" - "fmt" - "syscall" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, fmt.Errorf("no matching entries in passwd file") - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, fmt.Errorf("no matching entries in group file") - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 721794887..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go deleted file mode 100644 index 13226dbfa..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user.go +++ /dev/null @@ -1,407 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid, Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() - } - - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - var ( - userArg, groupArg string - name string - ) - - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - return u.Uid == user.Uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - name = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - user.Uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || name != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // Explicit group format takes precedence. - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - - // Check if user is a member. - for _, u := range g.List { - if u == name { - return true - } - } - - return false - }) - if err != nil && group != nil { - return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - user.Gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - user.Gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // Ensure gid is inside gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - // If implicit group format, fill supplementary gids. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroupsPath looks up a list of groups by name or group id -// against the group file. If a group name cannot be found, an error will be -// returned. If a group id cannot be found, it will be returned as-is. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - groupReader, err := os.Open(groupPath) - if err != nil { - return nil, fmt.Errorf("Failed to open group file: %v", err) - } - defer groupReader.Close() - - groups, err := ParseGroupFilter(groupReader, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go deleted file mode 100644 index ffb0760e2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/docker/libcontainer/user/user_test.go +++ /dev/null @@ -1,443 +0,0 @@ -package user - -import ( - "fmt" - "io" - "io/ioutil" - "reflect" - "sort" - "strconv" - "strings" - "testing" -) - -func TestUserParseLine(t *testing.T) { - var ( - a, b string - c []string - d int - ) - - parseLine("", &a, &b) - if a != "" || b != "" { - t.Fatalf("a and b should be empty ('%v', '%v')", a, b) - } - - parseLine("a", &a, &b) - if a != "a" || b != "" { - t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) - } - - parseLine("bad boys:corny cows", &a, &b) - if a != "bad boys" || b != "corny cows" { - t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) - } - - parseLine("", &c) - if len(c) != 0 { - t.Fatalf("c should be empty (%#v)", c) - } - - parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) - if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { - t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("::::::::::", &a, &b, &c) - if a != "" || b != "" || len(c) != 0 { - t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("not a number", &d) - if d != 0 { - t.Fatalf("d should be 0 (%v)", d) - } - - parseLine("b:12:c", &a, &d, &b) - if a != "b" || b != "c" || d != 12 { - t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) - } -} - -func TestUserParsePasswd(t *testing.T) { - users, err := ParsePasswdFilter(strings.NewReader(` -root:x:0:0:root:/root:/bin/bash -adm:x:3:4:adm:/var/adm:/bin/false -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(users) != 3 { - t.Fatalf("Expected 3 users, got %v", len(users)) - } - if users[0].Uid != 0 || users[0].Name != "root" { - t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) - } - if users[1].Uid != 3 || users[1].Name != "adm" { - t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) - } -} - -func TestUserParseGroup(t *testing.T) { - groups, err := ParseGroupFilter(strings.NewReader(` -root:x:0:root -adm:x:4:root,adm,daemon -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(groups) != 3 { - t.Fatalf("Expected 3 groups, got %v", len(groups)) - } - if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { - t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) - } - if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { - t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) - } -} - -func TestValidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - expected ExecUser - }{ - { - ref: "root", - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{0, 1234}, - Home: "/root", - }, - }, - { - ref: "adm", - expected: ExecUser{ - Uid: 42, - Gid: 43, - Sgids: []int{1234}, - Home: "/var/adm", - }, - }, - { - ref: "root:adm", - expected: ExecUser{ - Uid: 0, - Gid: 43, - Sgids: defaultExecUser.Sgids, - Home: "/root", - }, - }, - { - ref: "adm:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "42:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "1337:1234", - expected: ExecUser{ - Uid: 1337, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "1337", - expected: ExecUser{ - Uid: 1337, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "", - expected: ExecUser{ - Uid: defaultExecUser.Uid, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestInvalidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - tests := []string{ - // No such user/group. - "notuser", - "notuser:notgroup", - "root:notgroup", - "notuser:adm", - "8888:notgroup", - "notuser:8888", - - // Invalid user/group values. - "-1:0", - "0:-3", - "-5:-2", - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test, nil, passwd, group) - if err == nil { - t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) - t.Fail() - continue - } - } -} - -func TestGetExecUserNilSources(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - passwd, group bool - expected ExecUser - }{ - { - ref: "", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "root", - passwd: true, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/root", - }, - }, - { - ref: "0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "0:0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - } - - for _, test := range tests { - var passwd, group io.Reader - - if test.passwd { - passwd = strings.NewReader(passwdContent) - } - - if test.group { - group = strings.NewReader(groupContent) - } - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestGetAdditionalGroupsPath(t *testing.T) { - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -adm:x:4343:root,adm-duplicate -this is just some garbage data -` - tests := []struct { - groups []string - expected []int - hasError bool - }{ - { - // empty group - groups: []string{}, - expected: []int{}, - }, - { - // single group - groups: []string{"adm"}, - expected: []int{43}, - }, - { - // multiple groups - groups: []string{"adm", "grp"}, - expected: []int{43, 1234}, - }, - { - // invalid group - groups: []string{"adm", "grp", "not-exist"}, - expected: nil, - hasError: true, - }, - { - // group with numeric id - groups: []string{"43"}, - expected: []int{43}, - }, - { - // group with unknown numeric id - groups: []string{"adm", "10001"}, - expected: []int{43, 10001}, - }, - { - // groups specified twice with numeric and name - groups: []string{"adm", "43"}, - expected: []int{43}, - }, - { - // groups with too small id - groups: []string{"-1"}, - expected: nil, - hasError: true, - }, - { - // groups with too large id - groups: []string{strconv.Itoa(1 << 31)}, - expected: nil, - hasError: true, - }, - } - - for _, test := range tests { - tmpFile, err := ioutil.TempFile("", "get-additional-groups-path") - if err != nil { - t.Error(err) - } - fmt.Fprint(tmpFile, groupContent) - tmpFile.Close() - - gids, err := GetAdditionalGroupsPath(test.groups, tmpFile.Name()) - if test.hasError && err == nil { - t.Errorf("Parse(%#v) expects error but has none", test) - continue - } - if !test.hasError && err != nil { - t.Errorf("Parse(%#v) has error %v", test, err) - continue - } - sort.Sort(sort.IntSlice(gids)) - if !reflect.DeepEqual(gids, test.expected) { - t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b05..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go deleted file mode 100644 index 9814c501e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Set(r, key, value) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c740031..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md deleted file mode 100644 index 9a046ff97..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,235 +0,0 @@ -mux -=== -[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is `www.example.com`. Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -`www.example.com`, because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -## Full Example - -Here's a complete, runnable example of a small mux based server: - -```go -package main - -import ( - "net/http" - - "github.com/gorilla/mux" -) - -func YourHandler(w http.ResponseWriter, r *http.Request) { - w.Write([]byte("Gorilla!\n")) -} - -func main() { - r := mux.NewRouter() - // Routes consist of a path and a handler function. - r.HandleFunc("/", YourHandler) - - // Bind to a port and pass our router in - http.ListenAndServe(":8000", r) -} -``` - -## License - -BSD licensed. See the LICENSE file for details. diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2b2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go deleted file mode 100644 index 49798cb5c..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.example.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.example.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go deleted file mode 100644 index b32e1a051..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,469 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "path" - "regexp" - - "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVars registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { - continue - } - - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -// This only works when called inside the handler of the matched route -// because the matched route is stored in the request context which is cleared -// after the handler returns, unless the KeepContext option is set on the -// Router. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index 74cb98b83..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,1334 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "strings" - "testing" - - "github.com/fsouza/go-dockerclient/external/github.com/gorilla/context" -) - -func (r *Route) GoString() string { - matchers := make([]string, len(r.matchers)) - for i, m := range r.matchers { - matchers[i] = fmt.Sprintf("%#v", m) - } - return fmt.Sprintf("&Route{matchers:[]matcher{%s}}", strings.Join(matchers, ", ")) -} - -func (r *routeRegexp) GoString() string { - return fmt.Sprintf("&routeRegexp{template: %q, matchHost: %t, matchQuery: %t, strictSlash: %t, regexp: regexp.MustCompile(%q), reverse: %q, varsN: %v, varsR: %v", r.template, r.matchHost, r.matchQuery, r.strictSlash, r.regexp.String(), r.reverse, r.varsN, r.varsR) -} - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, additional capturing group, match", - route: new(Route).Host("aaa.{v1:[a-z]{2}(b|c)}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with hyphenated name and pattern, match", - route: new(Route).Host("aaa.{v-1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with hyphenated name and pattern, additional capturing group, match", - route: new(Route).Host("aaa.{v-1:[a-z]{2}(b|c)}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple hyphenated names and patterns, match", - route: new(Route).Host("{v-1:[a-z]{3}}.{v-2:[a-z]{3}}.{v-3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v-1": "aaa", "v-2": "bbb", "v-3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/a"), - vars: map[string]string{"category": "a"}, - host: "", - path: "/a", - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/b/c"), - vars: map[string]string{"category": "b/c"}, - host: "", - path: "/b/c", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/b/c/product_name/1"), - vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, - host: "", - path: "/b/c/product_name/1", - shouldMatch: true, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|(b/c)}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - shouldMatch: true, - }, - { - title: "Path route with hyphenated name and pattern, match", - route: new(Route).Path("/111/{v-1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v-1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns, match", - route: new(Route).Path("/{v-1:[0-9]{3}}/{v-2:[0-9]{3}}/{v-3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v-1": "111", "v-2": "222", "v-3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple hyphenated names and patterns with pipe, match", - route: new(Route).Path("/{product-category:a|(b/c)}/{product-name}/{product-id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"product-category": "a", "product-name": "product_name", "product-id": "1"}, - host: "", - path: "/a/product_name/1", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).Headers("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).HeadersRegexp("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?bar=2&foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, additional capturing group", - route: new(Route).Queries("foo", "{v1:[0-9]{1}(a|b)}"), - request: newRequest("GET", "http://localhost?foo=1a"), - vars: map[string]string{"v1": "1a"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with hyphenated name, match", - route: new(Route).Queries("foo", "{v-1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v-1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple hyphenated names, match", - route: new(Route).Queries("foo", "{v-1}", "baz", "{v-2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v-1": "bar", "v-2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with hyphenate name and pattern, match", - route: new(Route).Queries("foo", "{v-1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v-1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with hyphenated name and pattern with quantifier, additional capturing group", - route: new(Route).Queries("foo", "{v-1:[0-9]{1}(a|b)}"), - request: newRequest("GET", "http://localhost?foo=1a"), - vars: map[string]string{"v-1": "1a"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with empty value, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with empty value and no parameter in request, should not match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty value and empty parameter in request, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with overlapping value, should not match", - route: new(Route).Queries("foo", "bar"), - request: newRequest("GET", "http://localhost?foo=barfoo"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with no parameter in request, should not match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty parameter in request, should match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{"foo": ""}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad submatch", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?fffoo=bar&baz=dingggg"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestBuildVarsFunc(t *testing.T) { - tests := []routeTest{ - { - title: "BuildVarsFunc set on route", - route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "3" - vars["v2"] = "a" - return vars - }), - request: newRequest("GET", "http://localhost/111/2"), - path: "/111/3a", - shouldMatch: true, - }, - { - title: "BuildVarsFunc set on route and parent route", - route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "2" - return vars - }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v2"] = "b" - return vars - }), - request: newRequest("GET", "http://localhost/1/a"), - path: "/2/b", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestWalkSingleDepth(t *testing.T) { - r0 := NewRouter() - r1 := NewRouter() - r2 := NewRouter() - - r0.Path("/g") - r0.Path("/o") - r0.Path("/d").Handler(r1) - r0.Path("/r").Handler(r2) - r0.Path("/a") - - r1.Path("/z") - r1.Path("/i") - r1.Path("/l") - r1.Path("/l") - - r2.Path("/i") - r2.Path("/l") - r2.Path("/l") - - paths := []string{"g", "o", "r", "i", "l", "l", "a"} - depths := []int{0, 0, 0, 1, 1, 1, 0} - i := 0 - err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { - matcher := route.matchers[0].(*routeRegexp) - if matcher.template == "/d" { - return SkipRouter - } - if len(ancestors) != depths[i] { - t.Errorf(`Expected depth of %d at i = %d; got "%s"`, depths[i], i, len(ancestors)) - } - if matcher.template != "/"+paths[i] { - t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) - } - i++ - return nil - }) - if err != nil { - panic(err) - } - if i != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), i) - } -} - -func TestWalkNested(t *testing.T) { - router := NewRouter() - - g := router.Path("/g").Subrouter() - o := g.PathPrefix("/o").Subrouter() - r := o.PathPrefix("/r").Subrouter() - i := r.PathPrefix("/i").Subrouter() - l1 := i.PathPrefix("/l").Subrouter() - l2 := l1.PathPrefix("/l").Subrouter() - l2.Path("/a") - - paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} - idx := 0 - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - path := paths[idx] - tpl := route.regexp.path.template - if tpl != path { - t.Errorf(`Expected %s got %s`, path, tpl) - } - idx++ - return nil - }) - if err != nil { - panic(err) - } - if idx != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), idx) - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - shouldRedirect := test.shouldRedirect - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 755db483e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,714 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.example.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.example.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.example.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 06728dd54..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,317 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strconv" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchQuery { - defaultPattern = "[^?&]*" - } else if matchHost { - defaultPattern = "[^.]+" - matchPrefix = false - } - // Only match strict slash if not matching - if matchPrefix || matchHost || matchQuery { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - varIdx := i / 2 - fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(varIdx), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[varIdx] = name - varsR[varIdx], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if matchQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path or query string match. - matchHost bool - // True for query string match, false for path and host match. - matchQuery bool - // The strictSlash value defined on the route, but disabled if PathPrefix was used. - strictSlash bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - if r.matchQuery { - return r.matchQueryString(req) - } else { - return r.regexp.MatchString(req.URL.Path) - } - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getUrlQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getUrlQuery(req *http.Request) string { - if !r.matchQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getUrlQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// varGroupName builds a capturing group name for the indexed variable. -func varGroupName(idx int) string { - return "v" + strconv.Itoa(idx) -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - subexpNames := v.host.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.host.varsN[varName]] = hostVars[i+1] - varName++ - } - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - subexpNames := v.path.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[v.path.varsN[varName]] = pathVars[i+1] - varName++ - } - } - // Check if we should redirect. - if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) - if queryVars != nil { - subexpNames := q.regexp.SubexpNames() - varName := 0 - for i, name := range subexpNames[1:] { - if name != "" && name == varGroupName(varName) { - m.Vars[q.varsN[varName]] = queryVars[i+1] - varName++ - } - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go deleted file mode 100644 index 890130460..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,603 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - buildVarsFunc BuildVarsFunc -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if matchQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// Alternatively, you can provide a regular expression and match the header as follows: -// -// r.Headers("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will the same as the previous example, with the addition of matching -// application/text as well. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// Regular expressions can be used with headers as well. -// It accepts a sequence of key/value pairs, where the value has regex support. For example -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.example.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true, false) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// It the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.example.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe20066..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go deleted file mode 100644 index 6f8a982ff..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ /dev/null @@ -1,108 +0,0 @@ -package user - -import ( - "errors" - "fmt" - "syscall" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, fmt.Errorf("no matching entries in passwd file") - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, fmt.Errorf("no matching entries in group file") - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c2..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 721794887..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index 964e31bfd..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,413 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid, Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() - } - - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - var ( - userArg, groupArg string - name string - ) - - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - return u.Uid == user.Uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - name = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - user.Uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || name != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // Explicit group format takes precedence. - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - - // Check if user is a member. - for _, u := range g.List { - if u == name { - return true - } - } - - return false - }) - if err != nil && group != nil { - return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - user.Gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - user.Gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // Ensure gid is inside gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - // If implicit group format, fill supplementary gids. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id against -// against the given /etc/group formatted data. If a group name cannot be found, -// an error will be returned. If a group id cannot be found, it will be returned -// as-is. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - groups, err := ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// Wrapper around GetAdditionalGroups that opens the groupPath given and gives -// it as an argument to GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - group, err := os.Open(groupPath) - if err != nil { - return nil, fmt.Errorf("Failed to open group file: %v", err) - } - defer group.Close() - return GetAdditionalGroups(additionalGroups, group) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go deleted file mode 100644 index 0e37ac3dd..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user/user_test.go +++ /dev/null @@ -1,436 +0,0 @@ -package user - -import ( - "io" - "reflect" - "sort" - "strconv" - "strings" - "testing" -) - -func TestUserParseLine(t *testing.T) { - var ( - a, b string - c []string - d int - ) - - parseLine("", &a, &b) - if a != "" || b != "" { - t.Fatalf("a and b should be empty ('%v', '%v')", a, b) - } - - parseLine("a", &a, &b) - if a != "a" || b != "" { - t.Fatalf("a should be 'a' and b should be empty ('%v', '%v')", a, b) - } - - parseLine("bad boys:corny cows", &a, &b) - if a != "bad boys" || b != "corny cows" { - t.Fatalf("a should be 'bad boys' and b should be 'corny cows' ('%v', '%v')", a, b) - } - - parseLine("", &c) - if len(c) != 0 { - t.Fatalf("c should be empty (%#v)", c) - } - - parseLine("d,e,f:g:h:i,j,k", &c, &a, &b, &c) - if a != "g" || b != "h" || len(c) != 3 || c[0] != "i" || c[1] != "j" || c[2] != "k" { - t.Fatalf("a should be 'g', b should be 'h', and c should be ['i','j','k'] ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("::::::::::", &a, &b, &c) - if a != "" || b != "" || len(c) != 0 { - t.Fatalf("a, b, and c should all be empty ('%v', '%v', '%#v')", a, b, c) - } - - parseLine("not a number", &d) - if d != 0 { - t.Fatalf("d should be 0 (%v)", d) - } - - parseLine("b:12:c", &a, &d, &b) - if a != "b" || b != "c" || d != 12 { - t.Fatalf("a should be 'b' and b should be 'c', and d should be 12 ('%v', '%v', %v)", a, b, d) - } -} - -func TestUserParsePasswd(t *testing.T) { - users, err := ParsePasswdFilter(strings.NewReader(` -root:x:0:0:root:/root:/bin/bash -adm:x:3:4:adm:/var/adm:/bin/false -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(users) != 3 { - t.Fatalf("Expected 3 users, got %v", len(users)) - } - if users[0].Uid != 0 || users[0].Name != "root" { - t.Fatalf("Expected users[0] to be 0 - root, got %v - %v", users[0].Uid, users[0].Name) - } - if users[1].Uid != 3 || users[1].Name != "adm" { - t.Fatalf("Expected users[1] to be 3 - adm, got %v - %v", users[1].Uid, users[1].Name) - } -} - -func TestUserParseGroup(t *testing.T) { - groups, err := ParseGroupFilter(strings.NewReader(` -root:x:0:root -adm:x:4:root,adm,daemon -this is just some garbage data -`), nil) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - if len(groups) != 3 { - t.Fatalf("Expected 3 groups, got %v", len(groups)) - } - if groups[0].Gid != 0 || groups[0].Name != "root" || len(groups[0].List) != 1 { - t.Fatalf("Expected groups[0] to be 0 - root - 1 member, got %v - %v - %v", groups[0].Gid, groups[0].Name, len(groups[0].List)) - } - if groups[1].Gid != 4 || groups[1].Name != "adm" || len(groups[1].List) != 3 { - t.Fatalf("Expected groups[1] to be 4 - adm - 3 members, got %v - %v - %v", groups[1].Gid, groups[1].Name, len(groups[1].List)) - } -} - -func TestValidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - expected ExecUser - }{ - { - ref: "root", - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{0, 1234}, - Home: "/root", - }, - }, - { - ref: "adm", - expected: ExecUser{ - Uid: 42, - Gid: 43, - Sgids: []int{1234}, - Home: "/var/adm", - }, - }, - { - ref: "root:adm", - expected: ExecUser{ - Uid: 0, - Gid: 43, - Sgids: defaultExecUser.Sgids, - Home: "/root", - }, - }, - { - ref: "adm:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "42:1234", - expected: ExecUser{ - Uid: 42, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: "/var/adm", - }, - }, - { - ref: "1337:1234", - expected: ExecUser{ - Uid: 1337, - Gid: 1234, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "1337", - expected: ExecUser{ - Uid: 1337, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - { - ref: "", - expected: ExecUser{ - Uid: defaultExecUser.Uid, - Gid: defaultExecUser.Gid, - Sgids: defaultExecUser.Sgids, - Home: defaultExecUser.Home, - }, - }, - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestInvalidGetExecUser(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - tests := []string{ - // No such user/group. - "notuser", - "notuser:notgroup", - "root:notgroup", - "notuser:adm", - "8888:notgroup", - "notuser:8888", - - // Invalid user/group values. - "-1:0", - "0:-3", - "-5:-2", - } - - for _, test := range tests { - passwd := strings.NewReader(passwdContent) - group := strings.NewReader(groupContent) - - execUser, err := GetExecUser(test, nil, passwd, group) - if err == nil { - t.Logf("got unexpected success when parsing '%s': %#v", test, execUser) - t.Fail() - continue - } - } -} - -func TestGetExecUserNilSources(t *testing.T) { - const passwdContent = ` -root:x:0:0:root user:/root:/bin/bash -adm:x:42:43:adm:/var/adm:/bin/false -this is just some garbage data -` - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -this is just some garbage data -` - - defaultExecUser := ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - } - - tests := []struct { - ref string - passwd, group bool - expected ExecUser - }{ - { - ref: "", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 8888, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "root", - passwd: true, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/root", - }, - }, - { - ref: "0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 8888, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - { - ref: "0:0", - passwd: false, - group: false, - expected: ExecUser{ - Uid: 0, - Gid: 0, - Sgids: []int{8888}, - Home: "/8888", - }, - }, - } - - for _, test := range tests { - var passwd, group io.Reader - - if test.passwd { - passwd = strings.NewReader(passwdContent) - } - - if test.group { - group = strings.NewReader(groupContent) - } - - execUser, err := GetExecUser(test.ref, &defaultExecUser, passwd, group) - if err != nil { - t.Logf("got unexpected error when parsing '%s': %s", test.ref, err.Error()) - t.Fail() - continue - } - - if !reflect.DeepEqual(test.expected, *execUser) { - t.Logf("got: %#v", execUser) - t.Logf("expected: %#v", test.expected) - t.Fail() - continue - } - } -} - -func TestGetAdditionalGroups(t *testing.T) { - const groupContent = ` -root:x:0:root -adm:x:43: -grp:x:1234:root,adm -adm:x:4343:root,adm-duplicate -this is just some garbage data -` - tests := []struct { - groups []string - expected []int - hasError bool - }{ - { - // empty group - groups: []string{}, - expected: []int{}, - }, - { - // single group - groups: []string{"adm"}, - expected: []int{43}, - }, - { - // multiple groups - groups: []string{"adm", "grp"}, - expected: []int{43, 1234}, - }, - { - // invalid group - groups: []string{"adm", "grp", "not-exist"}, - expected: nil, - hasError: true, - }, - { - // group with numeric id - groups: []string{"43"}, - expected: []int{43}, - }, - { - // group with unknown numeric id - groups: []string{"adm", "10001"}, - expected: []int{43, 10001}, - }, - { - // groups specified twice with numeric and name - groups: []string{"adm", "43"}, - expected: []int{43}, - }, - { - // groups with too small id - groups: []string{"-1"}, - expected: nil, - hasError: true, - }, - { - // groups with too large id - groups: []string{strconv.Itoa(1 << 31)}, - expected: nil, - hasError: true, - }, - } - - for _, test := range tests { - group := strings.NewReader(groupContent) - - gids, err := GetAdditionalGroups(test.groups, group) - if test.hasError && err == nil { - t.Errorf("Parse(%#v) expects error but has none", test) - continue - } - if !test.hasError && err != nil { - t.Errorf("Parse(%#v) has error %v", test, err) - continue - } - sort.Sort(sort.IntSlice(gids)) - if !reflect.DeepEqual(gids, test.expected) { - t.Errorf("Gids(%v), expect %v from groups %v", gids, test.expected, test.groups) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go deleted file mode 100644 index f8be84690..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "time" -) - -// APIImages represent an image returned in the ListImages call. -type APIImages struct { - ID string `json:"Id" yaml:"Id"` - RepoTags []string `json:"RepoTags,omitempty" yaml:"RepoTags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` - ParentID string `json:"ParentId,omitempty" yaml:"ParentId,omitempty"` - RepoDigests []string `json:"RepoDigests,omitempty" yaml:"RepoDigests,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Image is the type representing a docker image and its various properties -type Image struct { - ID string `json:"Id" yaml:"Id"` - Parent string `json:"Parent,omitempty" yaml:"Parent,omitempty"` - Comment string `json:"Comment,omitempty" yaml:"Comment,omitempty"` - Created time.Time `json:"Created,omitempty" yaml:"Created,omitempty"` - Container string `json:"Container,omitempty" yaml:"Container,omitempty"` - ContainerConfig Config `json:"ContainerConfig,omitempty" yaml:"ContainerConfig,omitempty"` - DockerVersion string `json:"DockerVersion,omitempty" yaml:"DockerVersion,omitempty"` - Author string `json:"Author,omitempty" yaml:"Author,omitempty"` - Config *Config `json:"Config,omitempty" yaml:"Config,omitempty"` - Architecture string `json:"Architecture,omitempty" yaml:"Architecture,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` - VirtualSize int64 `json:"VirtualSize,omitempty" yaml:"VirtualSize,omitempty"` -} - -// ImagePre012 serves the same purpose as the Image type except that it is for -// earlier versions of the Docker API (pre-012 to be specific) -type ImagePre012 struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` -} - -var ( - // ErrNoSuchImage is the error returned when the image does not exist. - ErrNoSuchImage = errors.New("no such image") - - // ErrMissingRepo is the error returned when the remote repository is - // missing. - ErrMissingRepo = errors.New("missing remote repository e.g. 'github.com/user/repo'") - - // ErrMissingOutputStream is the error returned when no output stream - // is provided to some calls, like BuildImage. - ErrMissingOutputStream = errors.New("missing output stream") - - // ErrMultipleContexts is the error returned when both a ContextDir and - // InputStream are provided in BuildImageOptions - ErrMultipleContexts = errors.New("image build may not be provided BOTH context dir and input stream") - - // ErrMustSpecifyNames is the error rreturned when the Names field on - // ExportImagesOptions is nil or empty - ErrMustSpecifyNames = errors.New("must specify at least one name to export") -) - -// ListImagesOptions specify parameters to the ListImages function. -// -// See https://goo.gl/xBe1u3 for more details. -type ListImagesOptions struct { - All bool - Filters map[string][]string - Digests bool -} - -// ListImages returns the list of available images in the server. -// -// See https://goo.gl/xBe1u3 for more details. -func (c *Client) ListImages(opts ListImagesOptions) ([]APIImages, error) { - path := "/images/json?" + queryString(opts) - body, _, err := c.do("GET", path, doOptions{}) - if err != nil { - return nil, err - } - var images []APIImages - err = json.Unmarshal(body, &images) - if err != nil { - return nil, err - } - return images, nil -} - -// ImageHistory represent a layer in an image's history returned by the -// ImageHistory call. -type ImageHistory struct { - ID string `json:"Id" yaml:"Id"` - Tags []string `json:"Tags,omitempty" yaml:"Tags,omitempty"` - Created int64 `json:"Created,omitempty" yaml:"Created,omitempty"` - CreatedBy string `json:"CreatedBy,omitempty" yaml:"CreatedBy,omitempty"` - Size int64 `json:"Size,omitempty" yaml:"Size,omitempty"` -} - -// ImageHistory returns the history of the image by its name or ID. -// -// See https://goo.gl/8bnTId for more details. -func (c *Client) ImageHistory(name string) ([]ImageHistory, error) { - body, status, err := c.do("GET", "/images/"+name+"/history", doOptions{}) - if status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - if err != nil { - return nil, err - } - var history []ImageHistory - err = json.Unmarshal(body, &history) - if err != nil { - return nil, err - } - return history, nil -} - -// RemoveImage removes an image by its name or ID. -// -// See https://goo.gl/V3ZWnK for more details. -func (c *Client) RemoveImage(name string) error { - _, status, err := c.do("DELETE", "/images/"+name, doOptions{}) - if status == http.StatusNotFound { - return ErrNoSuchImage - } - return err -} - -// RemoveImageOptions present the set of options available for removing an image -// from a registry. -// -// See https://goo.gl/V3ZWnK for more details. -type RemoveImageOptions struct { - Force bool `qs:"force"` - NoPrune bool `qs:"noprune"` -} - -// RemoveImageExtended removes an image by its name or ID. -// Extra params can be passed, see RemoveImageOptions -// -// See https://goo.gl/V3ZWnK for more details. -func (c *Client) RemoveImageExtended(name string, opts RemoveImageOptions) error { - uri := fmt.Sprintf("/images/%s?%s", name, queryString(&opts)) - _, status, err := c.do("DELETE", uri, doOptions{}) - if status == http.StatusNotFound { - return ErrNoSuchImage - } - return err -} - -// InspectImage returns an image by its name or ID. -// -// See https://goo.gl/jHPcg6 for more details. -func (c *Client) InspectImage(name string) (*Image, error) { - body, status, err := c.do("GET", "/images/"+name+"/json", doOptions{}) - if status == http.StatusNotFound { - return nil, ErrNoSuchImage - } - if err != nil { - return nil, err - } - - var image Image - - // if the caller elected to skip checking the server's version, assume it's the latest - if c.SkipServerVersionCheck || c.expectedAPIVersion.GreaterThanOrEqualTo(apiVersion112) { - err = json.Unmarshal(body, &image) - if err != nil { - return nil, err - } - } else { - var imagePre012 ImagePre012 - err = json.Unmarshal(body, &imagePre012) - if err != nil { - return nil, err - } - - image.ID = imagePre012.ID - image.Parent = imagePre012.Parent - image.Comment = imagePre012.Comment - image.Created = imagePre012.Created - image.Container = imagePre012.Container - image.ContainerConfig = imagePre012.ContainerConfig - image.DockerVersion = imagePre012.DockerVersion - image.Author = imagePre012.Author - image.Config = imagePre012.Config - image.Architecture = imagePre012.Architecture - image.Size = imagePre012.Size - } - - return &image, nil -} - -// PushImageOptions represents options to use in the PushImage method. -// -// See https://goo.gl/zPtZaT for more details. -type PushImageOptions struct { - // Name of the image - Name string - - // Tag of the image - Tag string - - // Registry server to push the image - Registry string - - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// PushImage pushes an image to a remote registry, logging progress to w. -// -// An empty instance of AuthConfiguration may be used for unauthenticated -// pushes. -// -// See https://goo.gl/zPtZaT for more details. -func (c *Client) PushImage(opts PushImageOptions, auth AuthConfiguration) error { - if opts.Name == "" { - return ErrNoSuchImage - } - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - name := opts.Name - opts.Name = "" - path := "/images/" + name + "/push?" + queryString(&opts) - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - stdout: opts.OutputStream, - }) -} - -// PullImageOptions present the set of options available for pulling an image -// from a registry. -// -// See https://goo.gl/iJkZjD for more details. -type PullImageOptions struct { - Repository string `qs:"fromImage"` - Registry string - Tag string - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// PullImage pulls an image from a remote registry, logging progress to -// opts.OutputStream. -// -// See https://goo.gl/iJkZjD for more details. -func (c *Client) PullImage(opts PullImageOptions, auth AuthConfiguration) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - - headers, err := headersWithAuth(auth) - if err != nil { - return err - } - return c.createImage(queryString(&opts), headers, nil, opts.OutputStream, opts.RawJSONStream) -} - -func (c *Client) createImage(qs string, headers map[string]string, in io.Reader, w io.Writer, rawJSONStream bool) error { - path := "/images/create?" + qs - return c.stream("POST", path, streamOptions{ - setRawTerminal: true, - rawJSONStream: rawJSONStream, - headers: headers, - in: in, - stdout: w, - }) -} - -// LoadImageOptions represents the options for LoadImage Docker API Call -// -// See https://goo.gl/JyClMX for more details. -type LoadImageOptions struct { - InputStream io.Reader -} - -// LoadImage imports a tarball docker image -// -// See https://goo.gl/JyClMX for more details. -func (c *Client) LoadImage(opts LoadImageOptions) error { - return c.stream("POST", "/images/load", streamOptions{ - setRawTerminal: true, - in: opts.InputStream, - }) -} - -// ExportImageOptions represent the options for ExportImage Docker API call. -// -// See https://goo.gl/le7vK8 for more details. -type ExportImageOptions struct { - Name string - OutputStream io.Writer -} - -// ExportImage exports an image (as a tar file) into the stream. -// -// See https://goo.gl/le7vK8 for more details. -func (c *Client) ExportImage(opts ExportImageOptions) error { - return c.stream("GET", fmt.Sprintf("/images/%s/get", opts.Name), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// ExportImagesOptions represent the options for ExportImages Docker API call -// -// See https://goo.gl/huC7HA for more details. -type ExportImagesOptions struct { - Names []string - OutputStream io.Writer `qs:"-"` -} - -// ExportImages exports one or more images (as a tar file) into the stream -// -// See https://goo.gl/huC7HA for more details. -func (c *Client) ExportImages(opts ExportImagesOptions) error { - if opts.Names == nil || len(opts.Names) == 0 { - return ErrMustSpecifyNames - } - return c.stream("GET", "/images/get?"+queryString(&opts), streamOptions{ - setRawTerminal: true, - stdout: opts.OutputStream, - }) -} - -// ImportImageOptions present the set of informations available for importing -// an image from a source file or the stdin. -// -// See https://goo.gl/iJkZjD for more details. -type ImportImageOptions struct { - Repository string `qs:"repo"` - Source string `qs:"fromSrc"` - Tag string `qs:"tag"` - - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` -} - -// ImportImage imports an image from a url, a file or stdin -// -// See https://goo.gl/iJkZjD for more details. -func (c *Client) ImportImage(opts ImportImageOptions) error { - if opts.Repository == "" { - return ErrNoSuchImage - } - if opts.Source != "-" { - opts.InputStream = nil - } - if opts.Source != "-" && !isURL(opts.Source) { - f, err := os.Open(opts.Source) - if err != nil { - return err - } - opts.InputStream = f - opts.Source = "-" - } - return c.createImage(queryString(&opts), nil, opts.InputStream, opts.OutputStream, opts.RawJSONStream) -} - -// BuildImageOptions present the set of informations available for building an -// image from a tarfile with a Dockerfile in it. -// -// For more details about the Docker building process, see -// http://goo.gl/tlPXPu. -type BuildImageOptions struct { - Name string `qs:"t"` - Dockerfile string `qs:"dockerfile"` - NoCache bool `qs:"nocache"` - SuppressOutput bool `qs:"q"` - Pull bool `qs:"pull"` - RmTmpContainer bool `qs:"rm"` - ForceRmTmpContainer bool `qs:"forcerm"` - Memory int64 `qs:"memory"` - Memswap int64 `qs:"memswap"` - CPUShares int64 `qs:"cpushares"` - CPUSetCPUs string `qs:"cpusetcpus"` - InputStream io.Reader `qs:"-"` - OutputStream io.Writer `qs:"-"` - RawJSONStream bool `qs:"-"` - Remote string `qs:"remote"` - Auth AuthConfiguration `qs:"-"` // for older docker X-Registry-Auth header - AuthConfigs AuthConfigurations `qs:"-"` // for newer docker X-Registry-Config header - ContextDir string `qs:"-"` -} - -// BuildImage builds an image from a tarball's url or a Dockerfile in the input -// stream. -// -// See https://goo.gl/xySxCe for more details. -func (c *Client) BuildImage(opts BuildImageOptions) error { - if opts.OutputStream == nil { - return ErrMissingOutputStream - } - headers, err := headersWithAuth(opts.Auth, c.versionedAuthConfigs(opts.AuthConfigs)) - if err != nil { - return err - } - - if opts.Remote != "" && opts.Name == "" { - opts.Name = opts.Remote - } - if opts.InputStream != nil || opts.ContextDir != "" { - headers["Content-Type"] = "application/tar" - } else if opts.Remote == "" { - return ErrMissingRepo - } - if opts.ContextDir != "" { - if opts.InputStream != nil { - return ErrMultipleContexts - } - var err error - if opts.InputStream, err = createTarStream(opts.ContextDir, opts.Dockerfile); err != nil { - return err - } - } - - return c.stream("POST", fmt.Sprintf("/build?%s", queryString(&opts)), streamOptions{ - setRawTerminal: true, - rawJSONStream: opts.RawJSONStream, - headers: headers, - in: opts.InputStream, - stdout: opts.OutputStream, - }) -} - -func (c *Client) versionedAuthConfigs(authConfigs AuthConfigurations) interface{} { - if c.serverAPIVersion == nil { - c.checkAPIVersion() - } - if c.serverAPIVersion != nil && c.serverAPIVersion.GreaterThanOrEqualTo(apiVersion119) { - return AuthConfigurations119(authConfigs.Configs) - } - return authConfigs -} - -// TagImageOptions present the set of options to tag an image. -// -// See https://goo.gl/98ZzkU for more details. -type TagImageOptions struct { - Repo string - Tag string - Force bool -} - -// TagImage adds a tag to the image identified by the given name. -// -// See https://goo.gl/98ZzkU for more details. -func (c *Client) TagImage(name string, opts TagImageOptions) error { - if name == "" { - return ErrNoSuchImage - } - _, status, err := c.do("POST", fmt.Sprintf("/images/"+name+"/tag?%s", - queryString(&opts)), doOptions{}) - - if status == http.StatusNotFound { - return ErrNoSuchImage - } - - return err -} - -func isURL(u string) bool { - p, err := url.Parse(u) - if err != nil { - return false - } - return p.Scheme == "http" || p.Scheme == "https" -} - -func headersWithAuth(auths ...interface{}) (map[string]string, error) { - var headers = make(map[string]string) - - for _, auth := range auths { - switch auth.(type) { - case AuthConfiguration: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Auth"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - case AuthConfigurations, AuthConfigurations119: - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(auth); err != nil { - return nil, err - } - headers["X-Registry-Config"] = base64.URLEncoding.EncodeToString(buf.Bytes()) - } - } - - return headers, nil -} - -// APIImageSearch reflect the result of a search on the Docker Hub. -// -// See https://goo.gl/AYjyrF for more details. -type APIImageSearch struct { - Description string `json:"description,omitempty" yaml:"description,omitempty"` - IsOfficial bool `json:"is_official,omitempty" yaml:"is_official,omitempty"` - IsAutomated bool `json:"is_automated,omitempty" yaml:"is_automated,omitempty"` - Name string `json:"name,omitempty" yaml:"name,omitempty"` - StarCount int `json:"star_count,omitempty" yaml:"star_count,omitempty"` -} - -// SearchImages search the docker hub with a specific given term. -// -// See https://goo.gl/AYjyrF for more details. -func (c *Client) SearchImages(term string) ([]APIImageSearch, error) { - body, _, err := c.do("GET", "/images/search?term="+term, doOptions{}) - if err != nil { - return nil, err - } - var searchResult []APIImageSearch - err = json.Unmarshal(body, &searchResult) - if err != nil { - return nil, err - } - return searchResult, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go deleted file mode 100644 index 11d45e72a..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/image_test.go +++ /dev/null @@ -1,971 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "io/ioutil" - "net" - "net/http" - "net/url" - "os" - "reflect" - "strings" - "testing" - "time" -) - -func newTestClient(rt *FakeRoundTripper) Client { - endpoint := "http://localhost:4243" - u, _ := parseEndpoint("http://localhost:4243", false) - testAPIVersion, _ := NewAPIVersion("1.17") - client := Client{ - HTTPClient: &http.Client{Transport: rt}, - Dialer: &net.Dialer{}, - endpoint: endpoint, - endpointURL: u, - SkipServerVersionCheck: true, - serverAPIVersion: testAPIVersion, - } - return client -} - -type stdoutMock struct { - *bytes.Buffer -} - -func (m stdoutMock) Close() error { - return nil -} - -type stdinMock struct { - *bytes.Buffer -} - -func (m stdinMock) Close() error { - return nil -} - -func TestListImages(t *testing.T) { - body := `[ - { - "Repository":"base", - "Tag":"ubuntu-12.10", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "Repository":"base", - "Tag":"ubuntu-quantal", - "Id":"b750fe79269d", - "Created":1364102658 - }, - { - "RepoTag": [ - "ubuntu:12.04", - "ubuntu:precise", - "ubuntu:latest" - ], - "Id": "8dbd9e392a964c", - "Created": 1365714795, - "Size": 131506275, - "VirtualSize": 131506275 - }, - { - "RepoTag": [ - "ubuntu:12.10", - "ubuntu:quantal" - ], - "ParentId": "27cf784147099545", - "Id": "b750fe79269d2e", - "Created": 1364102658, - "Size": 24653, - "VirtualSize": 180116135 - } -]` - var expected []APIImages - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - images, err := client.ListImages(ListImagesOptions{}) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(images, expected) { - t.Errorf("ListImages: Wrong return value. Want %#v. Got %#v.", expected, images) - } -} - -func TestListImagesParameters(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "null", status: http.StatusOK} - client := newTestClient(fakeRT) - _, err := client.ListImages(ListImagesOptions{All: false}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ListImages({All: false}: Wrong HTTP method. Want GET. Got %s.", req.Method) - } - if all := req.URL.Query().Get("all"); all != "0" && all != "" { - t.Errorf("ListImages({All: false}): Wrong parameter. Want all=0 or not present at all. Got all=%s", all) - } - fakeRT.Reset() - _, err = client.ListImages(ListImagesOptions{All: true}) - if err != nil { - t.Fatal(err) - } - req = fakeRT.requests[0] - if all := req.URL.Query().Get("all"); all != "1" { - t.Errorf("ListImages({All: true}): Wrong parameter. Want all=1. Got all=%s", all) - } - fakeRT.Reset() - _, err = client.ListImages(ListImagesOptions{Filters: map[string][]string{ - "dangling": {"true"}, - }}) - if err != nil { - t.Fatal(err) - } - req = fakeRT.requests[0] - body := req.URL.Query().Get("filters") - var filters map[string][]string - err = json.Unmarshal([]byte(body), &filters) - if err != nil { - t.Fatal(err) - } - if len(filters["dangling"]) != 1 || filters["dangling"][0] != "true" { - t.Errorf("ListImages(dangling=[true]): Wrong filter map. Want dangling=[true], got dangling=%v", filters["dangling"]) - } -} - -func TestImageHistory(t *testing.T) { - body := `[ - { - "Id": "25daec02219d2d852f7526137213a9b199926b4b24e732eab5b8bc6c49bd470e", - "Tags": [ - "debian:7.6", - "debian:latest", - "debian:7", - "debian:wheezy" - ], - "Created": 1409856216, - "CreatedBy": "/bin/sh -c #(nop) CMD [/bin/bash]" - }, - { - "Id": "41026a5347fb5be6ed16115bf22df8569697139f246186de9ae8d4f67c335dce", - "Created": 1409856213, - "CreatedBy": "/bin/sh -c #(nop) ADD file:1ee9e97209d00e3416a4543b23574cc7259684741a46bbcbc755909b8a053a38 in /", - "Size": 85178663 - }, - { - "Id": "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158", - "Tags": [ - "scratch:latest" - ], - "Created": 1371157430 - } -]` - var expected []ImageHistory - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - history, err := client.ImageHistory("debian:latest") - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(history, expected) { - t.Errorf("ImageHistory: Wrong return value. Want %#v. Got %#v.", expected, history) - } -} - -func TestRemoveImage(t *testing.T) { - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveImage(name) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveImageNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) - err := client.RemoveImage("test:") - if err != ErrNoSuchImage { - t.Errorf("RemoveImage: wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestRemoveImageExtended(t *testing.T) { - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - err := client.RemoveImageExtended(name, RemoveImageOptions{Force: true, NoPrune: true}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveImage(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveImage(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } - expectedQuery := "force=1&noprune=1" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestInspectImage(t *testing.T) { - body := `{ - "Id":"b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - "Parent":"27cf784147099545", - "Created":"2013-03-23T22:24:18.818426Z", - "Container":"3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - "ContainerConfig":{"Memory":1}, - "VirtualSize":12345 -}` - - created, err := time.Parse(time.RFC3339Nano, "2013-03-23T22:24:18.818426Z") - if err != nil { - t.Fatal(err) - } - - expected := Image{ - ID: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - Parent: "27cf784147099545", - Created: created, - Container: "3d67245a8d72ecf13f33dffac9f79dcdf70f75acb84d308770391510e0c23ad0", - ContainerConfig: Config{ - Memory: 1, - }, - VirtualSize: 12345, - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - image, err := client.InspectImage(expected.ID) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*image, expected) { - t.Errorf("InspectImage(%q): Wrong image returned. Want %#v. Got %#v.", expected.ID, expected, *image) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("InspectImage(%q): Wrong HTTP method. Want GET. Got %s.", expected.ID, req.Method) - } - u, _ := url.Parse(client.getURL("/images/" + expected.ID + "/json")) - if req.URL.Path != u.Path { - t.Errorf("InspectImage(%q): Wrong request URL. Want %q. Got %q.", expected.ID, u.Path, req.URL.Path) - } -} - -func TestInspectImageNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such image", status: http.StatusNotFound}) - name := "test" - image, err := client.InspectImage(name) - if image != nil { - t.Errorf("InspectImage(%q): expected image, got %#v.", name, image) - } - if err != ErrNoSuchImage { - t.Errorf("InspectImage(%q): wrong error. Want %#v. Got %#v.", name, ErrNoSuchImage, err) - } -} - -func TestPushImage(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pushing 1/100" - if buf.String() != expected { - t.Errorf("PushImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PushImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/test/push")) - if req.URL.Path != u.Path { - t.Errorf("PushImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - if query := req.URL.Query().Encode(); query != "" { - t.Errorf("PushImage: Wrong query string. Want no parameters, got %q.", query) - } - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) - } - if strings.TrimSpace(string(auth)) != "{}" { - t.Errorf("PushImage: wrong body. Want %q. Got %q.", - base64.URLEncoding.EncodeToString([]byte("{}")), req.Header.Get("X-Registry-Auth")) - } -} - -func TestPushImageWithRawJSON(t *testing.T) { - body := ` - {"status":"Pushing..."} - {"status":"Pushing", "progress":"1/? (n/a)", "progressDetail":{"current":1}}} - {"status":"Image successfully pushed"} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - - err := client.PushImage(PushImageOptions{ - Name: "test", - OutputStream: &buf, - RawJSONStream: true, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("PushImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) - } -} - -func TestPushImageWithAuthentication(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - inputAuth := AuthConfiguration{ - Username: "gopher", - Password: "gopher123", - Email: "gopher@tsuru.io", - } - err := client.PushImage(PushImageOptions{Name: "test", OutputStream: &buf}, inputAuth) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - var gotAuth AuthConfiguration - - auth, err := base64.URLEncoding.DecodeString(req.Header.Get("X-Registry-Auth")) - if err != nil { - t.Errorf("PushImage: caught error decoding auth. %#v", err.Error()) - } - - err = json.Unmarshal(auth, &gotAuth) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotAuth, inputAuth) { - t.Errorf("PushImage: wrong auth configuration. Want %#v. Got %#v.", inputAuth, gotAuth) - } -} - -func TestPushImageCustomRegistry(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pushing 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var authConfig AuthConfiguration - var buf bytes.Buffer - opts := PushImageOptions{ - Name: "test", Registry: "docker.tsuru.io", - OutputStream: &buf, - } - err := client.PushImage(opts, authConfig) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedQuery := "registry=docker.tsuru.io" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PushImage: Wrong query string. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestPushImageNoName(t *testing.T) { - client := Client{} - err := client.PushImage(PushImageOptions{}, AuthConfiguration{}) - if err != ErrNoSuchImage { - t.Errorf("PushImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestPullImage(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{Repository: "base", OutputStream: &buf}, - AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - expected := "Pulling 1/100" - if buf.String() != expected { - t.Errorf("PullImage: Wrong output. Want %q. Got %q.", expected, buf.String()) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("PullImage: Wrong HTTP method. Want POST. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/images/create")) - if req.URL.Path != u.Path { - t.Errorf("PullImage: Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } - expectedQuery := "fromImage=base" - if query := req.URL.Query().Encode(); query != expectedQuery { - t.Errorf("PullImage: Wrong query strin. Want %q. Got %q.", expectedQuery, query) - } -} - -func TestPullImageWithRawJSON(t *testing.T) { - body := ` - {"status":"Pulling..."} - {"status":"Pulling", "progress":"1 B/ 100 B", "progressDetail":{"current":1, "total":100}} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - err := client.PullImage(PullImageOptions{ - Repository: "base", - OutputStream: &buf, - RawJSONStream: true, - }, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("PullImage: Wrong raw output. Want %q. Got %q", body, buf.String()) - } -} - -func TestPullImageWithoutOutputStream(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageCustomRegistry(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - OutputStream: &buf, - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageTag(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "Pulling 1/100", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := PullImageOptions{ - Repository: "base", - Registry: "docker.tsuru.io", - Tag: "latest", - OutputStream: &buf, - } - err := client.PullImage(opts, AuthConfiguration{}) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromImage": {"base"}, "registry": {"docker.tsuru.io"}, "tag": {"latest"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("PullImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestPullImageNoRepository(t *testing.T) { - var opts PullImageOptions - client := Client{} - err := client.PullImage(opts, AuthConfiguration{}) - if err != ErrNoSuchImage { - t.Errorf("PullImage: got wrong error. Want %#v. Got %#v.", ErrNoSuchImage, err) - } -} - -func TestImportImageFromUrl(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := ImportImageOptions{ - Source: "http://mycompany.com/file.tar", - Repository: "testimage", - Tag: "tag", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestImportImageFromInput(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - in := bytes.NewBufferString("tar content") - var buf bytes.Buffer - opts := ImportImageOptions{ - Source: "-", Repository: "testimage", - InputStream: in, OutputStream: &buf, - Tag: "tag", - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}, "tag": {opts.Tag}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) - } - e := "tar content" - if string(body) != e { - t.Errorf("ImportImage: wrong body. Want %#v. Got %#v.", e, string(body)) - } -} - -func TestImportImageDoesNotPassesInputIfSourceIsNotDash(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - in := bytes.NewBufferString("foo") - opts := ImportImageOptions{ - Source: "http://test.com/container.tar", Repository: "testimage", - InputStream: in, OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {opts.Source}, "repo": {opts.Repository}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Errorf("ImportImage: caugth error while reading body %#v", err.Error()) - } - if string(body) != "" { - t.Errorf("ImportImage: wrong body. Want nothing. Got %#v.", string(body)) - } -} - -func TestImportImageShouldPassTarContentToBodyWhenSourceIsFilePath(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - tarPath := "testing/data/container.tar" - opts := ImportImageOptions{ - Source: tarPath, Repository: "testimage", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - tar, err := os.Open(tarPath) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - tarContent, err := ioutil.ReadAll(tar) - body, err := ioutil.ReadAll(req.Body) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(tarContent, body) { - t.Errorf("ImportImage: wrong body. Want %#v content. Got %#v.", tarPath, body) - } -} - -func TestImportImageShouldChangeSourceToDashWhenItsAFilePath(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - tarPath := "testing/data/container.tar" - opts := ImportImageOptions{ - Source: tarPath, Repository: "testimage", - OutputStream: &buf, - } - err := client.ImportImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"fromSrc": {"-"}, "repo": {opts.Repository}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("ImportImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageParameters(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - NoCache: true, - SuppressOutput: true, - Pull: true, - RmTmpContainer: true, - ForceRmTmpContainer: true, - Memory: 1024, - Memswap: 2048, - CPUShares: 10, - CPUSetCPUs: "0-3", - InputStream: &buf, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil && strings.Index(err.Error(), "build image fail") == -1 { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{ - "t": {opts.Name}, - "nocache": {"1"}, - "q": {"1"}, - "pull": {"1"}, - "rm": {"1"}, - "forcerm": {"1"}, - "memory": {"1024"}, - "memswap": {"2048"}, - "cpushares": {"10"}, - "cpusetcpus": {"0-3"}, - } - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageParametersForRemoteBuild(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - Remote: "testing/data/container.tar", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"t": {opts.Name}, "remote": {opts.Remote}, "q": {"1"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestBuildImageMissingRepoAndNilInput(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != ErrMissingRepo { - t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingRepo, err) - } -} - -func TestBuildImageMissingOutputStream(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := BuildImageOptions{Name: "testImage"} - err := client.BuildImage(opts) - if err != ErrMissingOutputStream { - t.Errorf("BuildImage: wrong error returned. Want %#v. Got %#v.", ErrMissingOutputStream, err) - } -} - -func TestBuildImageWithRawJSON(t *testing.T) { - body := ` - {"stream":"Step 0 : FROM ubuntu:latest\n"} - {"stream":" ---\u003e 4300eb9d3c8d\n"} - {"stream":"Step 1 : MAINTAINER docker \n"} - {"stream":" ---\u003e Using cache\n"} - {"stream":" ---\u003e 3a3ed758c370\n"} - {"stream":"Step 2 : CMD /usr/bin/top\n"} - {"stream":" ---\u003e Running in 36b1479cc2e4\n"} - {"stream":" ---\u003e 4b6188aebe39\n"} - {"stream":"Removing intermediate container 36b1479cc2e4\n"} - {"stream":"Successfully built 4b6188aebe39\n"} - ` - fakeRT := &FakeRoundTripper{ - message: body, - status: http.StatusOK, - header: map[string]string{ - "Content-Type": "application/json", - }, - } - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Name: "testImage", - RmTmpContainer: true, - InputStream: &buf, - OutputStream: &buf, - RawJSONStream: true, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - if buf.String() != body { - t.Errorf("BuildImage: Wrong raw output. Want %q. Got %q.", body, buf.String()) - } -} - -func TestBuildImageRemoteWithoutName(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - var buf bytes.Buffer - opts := BuildImageOptions{ - Remote: "testing/data/container.tar", - SuppressOutput: true, - OutputStream: &buf, - } - err := client.BuildImage(opts) - if err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := map[string][]string{"t": {opts.Remote}, "remote": {opts.Remote}, "q": {"1"}} - got := map[string][]string(req.URL.Query()) - if !reflect.DeepEqual(got, expected) { - t.Errorf("BuildImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestTagImageParameters(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := TagImageOptions{Repo: "testImage"} - err := client.TagImage("base", opts) - if err != nil && strings.Index(err.Error(), "tag image fail") == -1 { - t.Fatal(err) - } - req := fakeRT.requests[0] - expected := "http://localhost:4243/images/base/tag?repo=testImage" - got := req.URL.String() - if !reflect.DeepEqual(got, expected) { - t.Errorf("TagImage: wrong query string. Want %#v. Got %#v.", expected, got) - } -} - -func TestTagImageMissingRepo(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := TagImageOptions{Repo: "testImage"} - err := client.TagImage("", opts) - if err != ErrNoSuchImage { - t.Errorf("TestTag: wrong error returned. Want %#v. Got %#v.", - ErrNoSuchImage, err) - } -} - -func TestIsUrl(t *testing.T) { - url := "http://foo.bar/" - result := isURL(url) - if !result { - t.Errorf("isURL: wrong match. Expected %#v to be a url. Got %#v.", url, result) - } - url = "/foo/bar.tar" - result = isURL(url) - if result { - t.Errorf("isURL: wrong match. Expected %#v to not be a url. Got %#v", url, result) - } -} - -func TestLoadImage(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - tar, err := os.Open("testing/data/container.tar") - if err != nil { - t.Fatal(err) - } else { - defer tar.Close() - } - opts := LoadImageOptions{InputStream: tar} - err = client.LoadImage(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "POST" { - t.Errorf("LoadImage: wrong method. Expected %q. Got %q.", "POST", req.Method) - } - if req.URL.Path != "/images/load" { - t.Errorf("LoadImage: wrong URL. Expected %q. Got %q.", "/images/load", req.URL.Path) - } -} - -func TestExportImage(t *testing.T) { - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImageOptions{Name: "testimage", OutputStream: &buf} - err := client.ExportImage(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) - } - expectedPath := "/images/testimage/get" - if req.URL.Path != expectedPath { - t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expectedPath, req.URL.Path) - } -} - -func TestExportImages(t *testing.T) { - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImagesOptions{Names: []string{"testimage1", "testimage2:latest"}, OutputStream: &buf} - err := client.ExportImages(opts) - if nil != err { - t.Error(err) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("ExportImage: wrong method. Expected %q. Got %q.", "GET", req.Method) - } - expected := "http://localhost:4243/images/get?names=testimage1&names=testimage2%3Alatest" - got := req.URL.String() - if !reflect.DeepEqual(got, expected) { - t.Errorf("ExportIMage: wrong path. Expected %q. Got %q.", expected, got) - } -} - -func TestExportImagesNoNames(t *testing.T) { - var buf bytes.Buffer - fakeRT := &FakeRoundTripper{message: "", status: http.StatusOK} - client := newTestClient(fakeRT) - opts := ExportImagesOptions{Names: []string{}, OutputStream: &buf} - err := client.ExportImages(opts) - if err == nil { - t.Error("Expected an error") - } - if err != ErrMustSpecifyNames { - t.Error(err) - } -} - -func TestSearchImages(t *testing.T) { - body := `[ - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - }, - { - "description":"A container with Cassandra 2.0.3", - "is_official":true, - "is_automated":false, - "name":"poklet/cassandra", - "star_count":17 - } - , - { - "description":"A container with Cassandra 2.0.3", - "is_official":false, - "is_automated":true, - "name":"poklet/cassandra", - "star_count":17 - } -]` - var expected []APIImageSearch - err := json.Unmarshal([]byte(body), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - result, err := client.SearchImages("cassandra") - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(result, expected) { - t.Errorf("SearchImages: Wrong return value. Want %#v. Got %#v.", expected, result) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go deleted file mode 100644 index df22cf494..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "bytes" - "strings" -) - -// Version returns version information about the docker server. -// -// See https://goo.gl/ND9R8L for more details. -func (c *Client) Version() (*Env, error) { - body, _, err := c.do("GET", "/version", doOptions{}) - if err != nil { - return nil, err - } - var env Env - if err := env.Decode(bytes.NewReader(body)); err != nil { - return nil, err - } - return &env, nil -} - -// Info returns system-wide information about the Docker server. -// -// See https://goo.gl/ElTHi2 for more details. -func (c *Client) Info() (*Env, error) { - body, _, err := c.do("GET", "/info", doOptions{}) - if err != nil { - return nil, err - } - var info Env - err = info.Decode(bytes.NewReader(body)) - if err != nil { - return nil, err - } - return &info, nil -} - -// ParseRepositoryTag gets the name of the repository and returns it splitted -// in two parts: the repository and the tag. -// -// Some examples: -// -// localhost.localdomain:5000/samalba/hipache:latest -> localhost.localdomain:5000/samalba/hipache, latest -// localhost.localdomain:5000/samalba/hipache -> localhost.localdomain:5000/samalba/hipache, "" -func ParseRepositoryTag(repoTag string) (repository string, tag string) { - n := strings.LastIndex(repoTag, ":") - if n < 0 { - return repoTag, "" - } - if tag := repoTag[n+1:]; !strings.Contains(tag, "/") { - return repoTag[:n], tag - } - return repoTag, "" -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go deleted file mode 100644 index ceaf076ed..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/misc_test.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "net/http" - "net/url" - "reflect" - "sort" - "testing" -) - -type DockerVersion struct { - Version string - GitCommit string - GoVersion string -} - -func TestVersion(t *testing.T) { - body := `{ - "Version":"0.2.2", - "GitCommit":"5a2a5cc+CHANGES", - "GoVersion":"go1.0.3" -}` - fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(&fakeRT) - expected := DockerVersion{ - Version: "0.2.2", - GitCommit: "5a2a5cc+CHANGES", - GoVersion: "go1.0.3", - } - version, err := client.Version() - if err != nil { - t.Fatal(err) - } - - if result := version.Get("Version"); result != expected.Version { - t.Errorf("Version(): Wrong result. Want %#v. Got %#v.", expected.Version, version.Get("Version")) - } - if result := version.Get("GitCommit"); result != expected.GitCommit { - t.Errorf("GitCommit(): Wrong result. Want %#v. Got %#v.", expected.GitCommit, version.Get("GitCommit")) - } - if result := version.Get("GoVersion"); result != expected.GoVersion { - t.Errorf("GoVersion(): Wrong result. Want %#v. Got %#v.", expected.GoVersion, version.Get("GoVersion")) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("Version(): wrong request method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/version")) - if req.URL.Path != u.Path { - t.Errorf("Version(): wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestVersionError(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - version, err := client.Version() - if version != nil { - t.Errorf("Version(): expected value, got %#v.", version) - } - if err == nil { - t.Error("Version(): unexpected error") - } -} - -func TestInfo(t *testing.T) { - body := `{ - "Containers":11, - "Images":16, - "Debug":0, - "NFd":11, - "NGoroutines":21, - "MemoryLimit":1, - "SwapLimit":0 -}` - fakeRT := FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(&fakeRT) - expected := Env{} - expected.SetInt("Containers", 11) - expected.SetInt("Images", 16) - expected.SetBool("Debug", false) - expected.SetInt("NFd", 11) - expected.SetInt("NGoroutines", 21) - expected.SetBool("MemoryLimit", true) - expected.SetBool("SwapLimit", false) - info, err := client.Info() - if err != nil { - t.Fatal(err) - } - infoSlice := []string(*info) - expectedSlice := []string(expected) - sort.Strings(infoSlice) - sort.Strings(expectedSlice) - if !reflect.DeepEqual(expectedSlice, infoSlice) { - t.Errorf("Info(): Wrong result.\nWant %#v.\nGot %#v.", expected, *info) - } - req := fakeRT.requests[0] - if req.Method != "GET" { - t.Errorf("Info(): Wrong HTTP method. Want GET. Got %s.", req.Method) - } - u, _ := url.Parse(client.getURL("/info")) - if req.URL.Path != u.Path { - t.Errorf("Info(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestInfoError(t *testing.T) { - fakeRT := &FakeRoundTripper{message: "internal error", status: http.StatusInternalServerError} - client := newTestClient(fakeRT) - version, err := client.Info() - if version != nil { - t.Errorf("Info(): expected value, got %#v.", version) - } - if err == nil { - t.Error("Info(): unexpected error") - } -} - -func TestParseRepositoryTag(t *testing.T) { - var tests = []struct { - input string - expectedRepo string - expectedTag string - }{ - { - "localhost.localdomain:5000/samalba/hipache:latest", - "localhost.localdomain:5000/samalba/hipache", - "latest", - }, - { - "localhost.localdomain:5000/samalba/hipache", - "localhost.localdomain:5000/samalba/hipache", - "", - }, - { - "tsuru/python", - "tsuru/python", - "", - }, - { - "tsuru/python:2.7", - "tsuru/python", - "2.7", - }, - } - for _, tt := range tests { - repo, tag := ParseRepositoryTag(tt.input) - if repo != tt.expectedRepo { - t.Errorf("ParseRepositoryTag(%q): wrong repository. Want %q. Got %q", tt.input, tt.expectedRepo, repo) - } - if tag != tt.expectedTag { - t.Errorf("ParseRepositoryTag(%q): wrong tag. Want %q. Got %q", tt.input, tt.expectedTag, tag) - } - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go deleted file mode 100644 index 0d3e2d43f..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" -) - -// ErrNetworkAlreadyExists is the error returned by CreateNetwork when the -// network already exists. -var ErrNetworkAlreadyExists = errors.New("network already exists") - -// Network represents a network. -// -// See https://goo.gl/FDkCdQ for more details. -type Network struct { - Name string `json:"name"` - ID string `json:"id"` - Type string `json:"type"` - Endpoints []*Endpoint `json:"endpoints"` -} - -// Endpoint represents an endpoint. -// -// See https://goo.gl/FDkCdQ for more details. -type Endpoint struct { - Name string `json:"name"` - ID string `json:"id"` - Network string `json:"network"` -} - -// ListNetworks returns all networks. -// -// See https://goo.gl/4hCNtZ for more details. -func (c *Client) ListNetworks() ([]Network, error) { - body, _, err := c.do("GET", "/networks", doOptions{}) - if err != nil { - return nil, err - } - var networks []Network - if err := json.Unmarshal(body, &networks); err != nil { - return nil, err - } - return networks, nil -} - -// NetworkInfo returns information about a network by its ID. -// -// See https://goo.gl/4hCNtZ for more details. -func (c *Client) NetworkInfo(id string) (*Network, error) { - path := "/networks/" + id - body, status, err := c.do("GET", path, doOptions{}) - if status == http.StatusNotFound { - return nil, &NoSuchNetwork{ID: id} - } - if err != nil { - return nil, err - } - var network Network - if err := json.Unmarshal(body, &network); err != nil { - return nil, err - } - return &network, nil -} - -// CreateNetworkOptions specify parameters to the CreateNetwork function and -// (for now) is the expected body of the "create network" http request message -// -// See https://goo.gl/FDkCdQ for more details. -type CreateNetworkOptions struct { - Name string `json:"name"` - NetworkType string `json:"network_type"` - Options map[string]interface{} `json:"options"` -} - -// CreateNetwork creates a new network, returning the network instance, -// or an error in case of failure. -// -// See http://goo.gl/mErxNp for more details. -func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { - body, status, err := c.do( - "POST", - "/networks", - doOptions{ - data: opts, - }, - ) - - if status == http.StatusConflict { - return nil, ErrNetworkAlreadyExists - } - if err != nil { - return nil, err - } - - type createNetworkResponse struct { - ID string - } - var ( - network Network - resp createNetworkResponse - ) - err = json.Unmarshal(body, &resp) - if err != nil { - return nil, err - } - - network.Name = opts.Name - network.ID = resp.ID - network.Type = opts.NetworkType - - return &network, nil -} - -// NoSuchNetwork is the error returned when a given network does not exist. -type NoSuchNetwork struct { - ID string -} - -func (err *NoSuchNetwork) Error() string { - return fmt.Sprintf("No such network: %s", err.ID) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go deleted file mode 100644 index 970988cf4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/network_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" -) - -func TestListNetworks(t *testing.T) { - jsonNetworks := `[ - { - "ID": "8dfafdbc3a40", - "Name": "blah", - "Type": "bridge", - "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] - }, - { - "ID": "9fb1e39c", - "Name": "foo", - "Type": "bridge", - "Endpoints":[{"ID": "c080be979dda", "Name": "lllll2222", "Network": "9fb1e39c"}] - } -]` - var expected []Network - err := json.Unmarshal([]byte(jsonNetworks), &expected) - if err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: jsonNetworks, status: http.StatusOK}) - containers, err := client.ListNetworks() - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(containers, expected) { - t.Errorf("ListNetworks: Expected %#v. Got %#v.", expected, containers) - } -} - -func TestNetworkInfo(t *testing.T) { - jsonNetwork := `{ - "ID": "8dfafdbc3a40", - "Name": "blah", - "Type": "bridge", - "Endpoints":[{"ID": "918c11c8288a", "Name": "dsafdsaf", "Network": "8dfafdbc3a40"}] - }` - var expected Network - err := json.Unmarshal([]byte(jsonNetwork), &expected) - if err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: jsonNetwork, status: http.StatusOK} - client := newTestClient(fakeRT) - id := "8dfafdbc3a40" - network, err := client.NetworkInfo(id) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(*network, expected) { - t.Errorf("NetworkInfo(%q): Expected %#v. Got %#v.", id, expected, network) - } - expectedURL, _ := url.Parse(client.getURL("/networks/8dfafdbc3a40")) - if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path { - t.Errorf("NetworkInfo(%q): Wrong path in request. Want %q. Got %q.", id, expectedURL.Path, gotPath) - } -} - -func TestNetworkCreate(t *testing.T) { - jsonID := `{"ID": "8dfafdbc3a40"}` - jsonNetwork := `{ - "ID": "8dfafdbc3a40", - "Name": "foobar", - "Type": "bridge" - }` - var expected Network - err := json.Unmarshal([]byte(jsonNetwork), &expected) - if err != nil { - t.Fatal(err) - } - - client := newTestClient(&FakeRoundTripper{message: jsonID, status: http.StatusOK}) - opts := CreateNetworkOptions{"foobar", "bridge", nil} - network, err := client.CreateNetwork(opts) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(*network, expected) { - t.Errorf("CreateNetwork: Expected %#v. Got %#v.", expected, network) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go deleted file mode 100644 index 16aa00388..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/signal.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -// Signal represents a signal that can be send to the container on -// KillContainer call. -type Signal int - -// These values represent all signals available on Linux, where containers will -// be running. -const ( - SIGABRT = Signal(0x6) - SIGALRM = Signal(0xe) - SIGBUS = Signal(0x7) - SIGCHLD = Signal(0x11) - SIGCLD = Signal(0x11) - SIGCONT = Signal(0x12) - SIGFPE = Signal(0x8) - SIGHUP = Signal(0x1) - SIGILL = Signal(0x4) - SIGINT = Signal(0x2) - SIGIO = Signal(0x1d) - SIGIOT = Signal(0x6) - SIGKILL = Signal(0x9) - SIGPIPE = Signal(0xd) - SIGPOLL = Signal(0x1d) - SIGPROF = Signal(0x1b) - SIGPWR = Signal(0x1e) - SIGQUIT = Signal(0x3) - SIGSEGV = Signal(0xb) - SIGSTKFLT = Signal(0x10) - SIGSTOP = Signal(0x13) - SIGSYS = Signal(0x1f) - SIGTERM = Signal(0xf) - SIGTRAP = Signal(0x5) - SIGTSTP = Signal(0x14) - SIGTTIN = Signal(0x15) - SIGTTOU = Signal(0x16) - SIGUNUSED = Signal(0x1f) - SIGURG = Signal(0x17) - SIGUSR1 = Signal(0xa) - SIGUSR2 = Signal(0xc) - SIGVTALRM = Signal(0x1a) - SIGWINCH = Signal(0x1c) - SIGXCPU = Signal(0x18) - SIGXFSZ = Signal(0x19) -) diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go deleted file mode 100644 index 48042cbda..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils" -) - -func createTarStream(srcPath, dockerfilePath string) (io.ReadCloser, error) { - excludes, err := parseDockerignore(srcPath) - if err != nil { - return nil, err - } - - includes := []string{"."} - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The deamon will remove them for us, if needed, after it - // parses the Dockerfile. - // - // https://github.com/docker/docker/issues/8330 - // - forceIncludeFiles := []string{".dockerignore", dockerfilePath} - - for _, includeFile := range forceIncludeFiles { - if includeFile == "" { - continue - } - keepThem, err := fileutils.Matches(includeFile, excludes) - if err != nil { - return nil, fmt.Errorf("cannot match .dockerfile: '%s', error: %s", includeFile, err) - } - if keepThem { - includes = append(includes, includeFile) - } - } - - if err := validateContextDirectory(srcPath, excludes); err != nil { - return nil, err - } - tarOpts := &archive.TarOptions{ - ExcludePatterns: excludes, - IncludeFiles: includes, - Compression: archive.Uncompressed, - NoLchown: true, - } - return archive.TarWithOptions(srcPath, tarOpts) -} - -// validateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read. -// Symlinks which point to non-existing files don't trigger an error -func validateContextDirectory(srcPath string, excludes []string) error { - return filepath.Walk(filepath.Join(srcPath, "."), func(filePath string, f os.FileInfo, err error) error { - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(srcPath, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -func parseDockerignore(root string) ([]string, error) { - var excludes []string - ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return excludes, fmt.Errorf("error reading .dockerignore: '%s'", err) - } - excludes = strings.Split(string(ignore), "\n") - - return excludes, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore deleted file mode 100644 index 027e8c20e..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/.dockerignore +++ /dev/null @@ -1,3 +0,0 @@ -container.tar -dockerfile.tar -foofile diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile deleted file mode 100644 index 0948dcfa8..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -# this file describes how to build tsuru python image -# to run it: -# 1- install docker -# 2- run: $ docker build -t tsuru/python https://raw.github.com/tsuru/basebuilder/master/python/Dockerfile - -from base:ubuntu-quantal -run apt-get install wget -y --force-yes -run wget http://github.com/tsuru/basebuilder/tarball/master -O basebuilder.tar.gz --no-check-certificate -run mkdir /var/lib/tsuru -run tar -xvf basebuilder.tar.gz -C /var/lib/tsuru --strip 1 -run cp /var/lib/tsuru/python/deploy /var/lib/tsuru -run cp /var/lib/tsuru/base/restart /var/lib/tsuru -run cp /var/lib/tsuru/base/start /var/lib/tsuru -run /var/lib/tsuru/base/install -run /var/lib/tsuru/base/setup diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/barfile deleted file mode 100644 index e69de29bb..000000000 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem deleted file mode 100644 index 8e38bba13..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/ca.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC1TCCAb+gAwIBAgIQJ9MsNxrUxumNbAytGi3GEDALBgkqhkiG9w0BAQswFjEU -MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTM4WhcNMTcwOTMwMjAy -MTM4WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBALpFCSARjG+5yXoqr7UMzuE0df7RRZfeRZI06lJ02ZqV4Iii -rgL7ML9yPxX50NbLnjiilSDTUhnyocYFItokzUzz8qpX/nlYhuN2Iqwh4d0aWS8z -f5y248F+H1z+HY2W8NPl/6DVlVwYaNW1/k+RPMlHS0INLR6j+3Ievew7RNE0NnM2 -znELW6NetekDt3GUcz0Z95vDUDfdPnIk1eIFMmYvLxZh23xOca4Q37a3S8F3d+dN -+OOpwjdgY9Qme0NQUaXpgp58jWuQfB8q7mZrdnLlLqRa8gx1HeDSotX7UmWtWPkb -vd9EdlKLYw5PVpxMV1rkwf2t4TdgD5NfkpXlXkkCAwEAAaMjMCEwDgYDVR0PAQH/ -BAQDAgCkMA8GA1UdEwEB/wQFMAMBAf8wCwYJKoZIhvcNAQELA4IBAQBxYjHVSKqE -MJw7CW0GddesULtXXVWGJuZdWJLQlPvPMfIfjIvlcZyS4cdVNiQ3sREFIZz8TpII -CT0/Pg3sgv/FcOQe1CN0xZYZcyiAZHK1z0fJQq2qVpdv7+tJcjI2vvU6NI24iQCo -W1wz25trJz9QbdB2MRLMjyz7TSWuafztIvcfEzaIdQ0Whqund/cSuPGQx5IwF83F -rvlkOyJSH2+VIEBTCIuykJeL0DLTt8cePBQR5L1ISXb4RUMK9ZtqRscBRv8sn7o2 -ixG3wtL0gYF4xLtsQWVxI3iFVrU3WzOH/3c5shVRkWBd+AQRSwCJI4mKH7penJCF -i3/zzlkvOnjV ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem deleted file mode 100644 index 5e7244b24..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/cert.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC6DCCAdKgAwIBAgIRANO6ymxQAjp66KmEka1G6b0wCwYJKoZIhvcNAQELMBYx -FDASBgNVBAoTC0Jvb3QyRG9ja2VyMB4XDTE0MTAxNjIwMjE1MloXDTE3MDkzMDIw -MjE1MlowFjEUMBIGA1UEChMLQm9vdDJEb2NrZXIwggEiMA0GCSqGSIb3DQEBAQUA -A4IBDwAwggEKAoIBAQDGA1mAhSOpZspD1dpZ7qVEQrIJw4Xo8252jHaORnEdDiFm -b6brEmr6jw8t4P3IGxbqBc/TqRV+SSXxwYEVvfpeQKH+SmqStoMNtD3Ura161az4 -V0BcxMtSlsUGpoz+//QCAq8qiaxMwgiyc5253mkQm88anj2cNt7xbewiu/KFWuf7 -BVpNK1+ltpJmlukfcj/G+I1bw7j1KxBjDrFqe5cyDuuZcDL2tmUXP/ZWDyXwSv+H -AOckqn44z6aXlBkVvOXDBZJqY76d/vWVDNCuZeXRnqlhP3t1kH4V0RQXo+JD2tgt -JgdU0unzyoFOSWNUBPm73tqmjUGGAmGHBmeegJr/AgMBAAGjNTAzMA4GA1UdDwEB -/wQEAwIAgDATBgNVHSUEDDAKBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMAsGCSqG -SIb3DQEBCwOCAQEABVTWl5SmBP+j5He5bQsgnIXjviSKqe40/10V4LJAOmilycRF -zLrzM+YMwfjg6PLIs8CldAMWHw9y9ktZY4MxkgCktaiaN/QmMTMwFWEcN4wy5IpM -U5l93eAg7xsnY430h3QBBADujX4wdF3fs8rSL8zAAQFL0ihurwU124K3yXKsrwpb -CiVUGfIN4sPwjy8Ws9oxHFDC9/P8lgjHZ1nBIf8KSHnMzlxDGj7isQfhtH+7mcCL -cM1qO2NirS2v7uaEPPY+MJstAz+W7EJCW9dfMSmHna2SDC37Xkin7uEY9z+qaKFL -8d/XxOB/L8Ucy8VZhdsv0dsBq5KfJntITM0ksQ== ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/container.tar deleted file mode 100644 index e4b066e3b6df8cb78ac445a34234f3780d164cf4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2048 zcmeH_Q3``F42FH)DgF~kTC`qZ7s*`9%A^%r$Bu89Fp<6NMew1akmheFe?H>)Y5N#5 z`(UT)m>?q4G^iwZ#(XmAwH8Ujv`|_rQd)Ig3sQ!(szArs+5bAH%#&Di1HU}iJx_zp z+3uU9k~Zgl)J<3?S%)LS_Hgc7e)t4AX&%Rz>>WAcX2Ec>82D}md=O1Y)p%bo=N_rJ OD+CIGLZA@%gTMmt=q{T8 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/dockerfile.tar deleted file mode 100644 index 32c9ce64704835cd096b85ac44c35b5087b5ccdd..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2560 zcmeHGy>8<$49;3V1%d0TNOs}`$a>xT46-c8LTt+?QB8ACf0XPiQll-h0~9$I?_v`_`p)qp;@ z0OJK)JAmosQD=m*-~y?5ASGvD1{zS;L7n!AYz2z}2Y8%Kb25fgK0fDb5l4UE+{yF$ zXs`{{TG^hbn!J);Cl1>2UV0=k!T8hL+GbhfZ2u5L51|SJ2KFb&fyiW3|3Qw(jvC+i zouk4oz*u9Q((Iyric9uLhPZsmgZ8ANMrS_2p5cn+n!M}dU&=mMrdq8|OlgOvF-oFN zh5A!%9Pk(EcxS4q(c~Z~u-BL7!+gIN2&&-GnGy1YRpY|{e@?X?J9}9;KY_$PxYO}H o;5QJT#=q||{Y*ZuNn-Gk-)jtGb|Y`+PV+v2`vmS2xaA4_1I+dVl>h($ diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/foofile deleted file mode 100644 index e69de29bb..000000000 diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem deleted file mode 100644 index a9346bcf4..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAxgNZgIUjqWbKQ9XaWe6lREKyCcOF6PNudox2jkZxHQ4hZm+m -6xJq+o8PLeD9yBsW6gXP06kVfkkl8cGBFb36XkCh/kpqkraDDbQ91K2tetWs+FdA -XMTLUpbFBqaM/v/0AgKvKomsTMIIsnOdud5pEJvPGp49nDbe8W3sIrvyhVrn+wVa -TStfpbaSZpbpH3I/xviNW8O49SsQYw6xanuXMg7rmXAy9rZlFz/2Vg8l8Er/hwDn -JKp+OM+ml5QZFbzlwwWSamO+nf71lQzQrmXl0Z6pYT97dZB+FdEUF6PiQ9rYLSYH -VNLp88qBTkljVAT5u97apo1BhgJhhwZnnoCa/wIDAQABAoIBAQCaGy9EC9pmU95l -DwGh7k5nIrUnTilg1FwLHWSDdCVCZKXv8ENrPelOWZqJrUo1u4eI2L8XTsewgkNq -tJu/DRzWz9yDaO0qg6rZNobMh+K076lvmZA44twOydJLS8H+D7ua+PXU2FLlZjmY -kMyXRJZmW6zCXZc7haTbJx6ZJccoquk/DkS4FcFurJP177u1YrWS9TTw9kensUtU -jQ63uf56UTN1i+0+Rxl7OW1TZlqwlri5I4njg5249+FxwwHzIq8+l7zD7K9pl8c/ -nG1HuulvU2bVlDlRdyslMPAH34vw9Sku1BD8furrJLr1na5lRSLKJODEaIPEsLwv -CdEUwP9JAoGBAO76ZW80RyNB2fA+wbTq70Sr8CwrXxYemXrez5LKDC7SsohKFCPE -IedpO/n+nmymiiJvMm874EExoG6BVrbkWkeb+2vinEfOQNlDMsDx7WLjPekP3t6i -rXHO3CjFooVFq2z3mZa/Nc5NZqu8fNWNCKJxZDJphdoj6sORNJIUvZVjAoGBANQd -++J+ITcu3/+A6JrGcgLunBFQYPqkiItk0J4QKYKuX5ik9rWcQDN8TTtfW2mDuiQ4 -NrCwuVPq1V1kB16JzH017SsYLo9g8I20YjnBZge9pKTeUaLVTb3C50LW8FBylop0 -Bnm597dNbtSjphjoTMg0XyC19o3Esf2YeWG0QNS1AoGAWWDfFRNJU99qIldmXULM -0DM6NVrXSk+ReYnhunXEzrJQwXZrR+EwCPurydk36Uz0NuK9yypquhdUeF/5TZfk -SAoHo5byekyipl9imRUigqyY2BTudvgCxKDoaHtaSFwBPFTyZZYICquaLbrmOXxw -8UhVgCFFRYvPXuts7QHC0h8CgYBWEvy9gfU0kV7wLX02IUTuj6jhFb7ktpN6DSTi -nyhZES1VoctDEu6ydcRZTW6ouH12aSE4Pd5WgTqntQmQgVZrkNB25k8ue2Xh+srJ -KQOgLIJ9LIHwE6KCWG7DnrjRzE3uTPq7to0g4tkQjH/AJ7PQof/gJDayfJjFkXPg -A+cy6QKBgEPbKpiqscm03gT2QanBut5pg4dqPOxp0SlErA3kSFNTRK3oYBQPC+LH -qA5nD5brdkeNBB58Rll8Zpzxiff50bcvLP/7/Sb3NjaXFTEY0gVbdRof3n6N0YP3 -Hu5XDNJ9RNkNzE5RIG1g86KE+aKlcrKMaigqAiuIy2PSnjkQeGk8 ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem deleted file mode 100644 index 89cc445e1..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/server.pem +++ /dev/null @@ -1,18 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC/DCCAeagAwIBAgIQMUILcXtvmSOK63zEBo0VXzALBgkqhkiG9w0BAQswFjEU -MBIGA1UEChMLQm9vdDJEb2NrZXIwHhcNMTQxMDE2MjAyMTQ2WhcNMTcwOTMwMjAy -MTQ2WjAWMRQwEgYDVQQKEwtCb290MkRvY2tlcjCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBANxUOUhNnqFnrTlLsBYzfFRZWQo268l+4K4lOJCVbfDonP3g -Mz0vGi9fcyFqEWSA8Y+ShXna625HTnReCwFdsu0861qCIq7v95hFFCyOe0iIxpd0 -AKLnl90d+1vonE7andgFgoobbTiMly4UK4H6z8D148fFNIihoteOG3PIF89TFxP7 -CJ/3wXnx/IKpdlO8PAnub3tBPJHvGDj7KORLy4IBxRX5VBAdfGNybE66fcrehEva -rLA4m9pgiaR/Nnr9FdKhPyqYdjflLNvzydxNvMIV4M0hFlhXmYvpMjA5/XsTnsyV -t9JHJa5Upwqsbne08t7rsm7liZNxZlko8xPOTQcCAwEAAaNKMEgwDgYDVR0PAQH/ -BAQDAgCgMAwGA1UdEwEB/wQCMAAwKAYDVR0RBCEwH4ILYm9vdDJkb2NrZXKHBH8A -AAGHBAoAAg+HBMCoO2cwCwYJKoZIhvcNAQELA4IBAQAYoYcDkDWkl73FZ0WnPmAj -LiF7HU95Qg3KyEpFsAJeShSLPPbQntmwhdekEzY4tQ3eKQB/+zHFjzsCr/lmDUmH -Ea/ryQ17C+jyH+Ykg0IWW6L6veZhvRDg6Z9focVtPVBRxPTqC/Qhb54blWRASV+W -UreMuXQ5+1dQptAM7ixOeLVHjBi/bd9TL3jvwBVCr9QedteMjjK4TCF9Tbcou+MF -2w3OJJZMDhcD+YwoK9uJDqlKmcTm/vVMbSsp/pTMcnQ7jxCeR8/XyX+VwTZwaHAa -o92Q/eg3THAiWhvyT/SzyH9dHHBAyXynUwGCggKawHktfvW4QXRPuLxLrJ7iB5cy ------END CERTIFICATE----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem deleted file mode 100644 index c897e5da5..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/data/serverkey.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEoAIBAAKCAQEA3FQ5SE2eoWetOUuwFjN8VFlZCjbryX7griU4kJVt8Oic/eAz -PS8aL19zIWoRZIDxj5KFedrrbkdOdF4LAV2y7TzrWoIiru/3mEUULI57SIjGl3QA -oueX3R37W+icTtqd2AWCihttOIyXLhQrgfrPwPXjx8U0iKGi144bc8gXz1MXE/sI -n/fBefH8gql2U7w8Ce5ve0E8ke8YOPso5EvLggHFFflUEB18Y3JsTrp9yt6ES9qs -sDib2mCJpH82ev0V0qE/Kph2N+Us2/PJ3E28whXgzSEWWFeZi+kyMDn9exOezJW3 -0kclrlSnCqxud7Ty3uuybuWJk3FmWSjzE85NBwIDAQABAoIBAG0ak+cW8LeShHf7 -3+2Of0GxoOLrAWWdG5uAuPr31CJYve0FybnBimDtDjD8ujIfm/7xmoEWBEFutA3x -x9dcU88gvJbsHEqub9gKVQwfXjMz78tt2SbSMiR/xUnk7QorPcCMMfE71aEMFYzu -1gCed6Rg3vO81t/V0rKVH0j9S7UQz5v/oX15eVDV5LOqyCHwAi6K0eXXbqnbI0TH -SOQ/nexM2msVXWbO9t6ra6f5V7FXziDK5Xi+rPxRbX9mkrDzxDAevfuRqYBx5vtL -W2Q2hKjUAHFgXFniNSZBS7dCdAtz0el/3ct+cNmpuTMhhs7M6wC1CuYiZ/DxLiFh -Si73VckCgYEA+/ceh3+VjtQ0rgEw8sD9bqYEA8IaBiObjneIoFnKBYRG7yZd8JMm -HD4M/aQ1qhcRLPN7GR03YQULgQJURbKSjJHnhfTXHyeHC3NN4gMVHQXewu2MHCh6 -7FCQ9CfK0KcYLgegVVvL3PrF3hyWGnmTu+G0UkDQRYVnaNrB7snrW6UCgYEA39tq -+MCQdu0moJ5szSZf02undg9EeW6isk9qzi7TId3/MLci2eH7PEnipipPUK3+DERq -aba0y0TKgBR2EXvXLFJA/+kfdo2loIEHOfox85HVfxgUaFRti63ZI0uF8D0QT2Yy -oJal+RFghVoSnv4LjhRKEPbIkScTXGjdK+7wFjsCfz79iKRXQQx0ALd/lL0bgkAn -QNmvrNHcFQeI2p8700WNzC39aX67SsvEt3qxkrjzC1gxhpTAuReIK1gVPPwvqHN8 -BmV20FD5kMlMCix2mNCopwgUWvKvLAvoGFTxncKMA39+aJbuXAjiqJTekKgNvOE7 -i9kEWw0GTNPp3JHV6QECgYAPwb0M11kT1euDIMOdyRazpf86kyaJuZzgGjD1ZFxe -JOcigbGFTp/FhZnbglzk2+pm6KXo3QBq0mPCki4hWusxZnTGzpz1VlETNCHTFeZQ -M7KoaIR/N3oie9Et59H8r/+m5xWnMhNqratyl316DX24uXrhKM3DUdHODl+LCR2D -IwKBgE1MbHuwolUPEw3HeO4R7NMFVTFei7E/fpUsimPfArGg8UydwvloNT1myJos -N2JzfGGjN2KPVcBk9fOs71mJ6VcK3C3g5JIccplk6h9VNaw55+zdQvKPTzoBoTvy -A+Fwx2AlF61KeRF87DL2YTRJ6B9MHmWgf7+GVZOxomLgEAcZ ------END RSA PRIVATE KEY----- diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go deleted file mode 100644 index 46f6b6721..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go +++ /dev/null @@ -1,1087 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package testing provides a fake implementation of the Docker API, useful for -// testing purpose. -package testing - -import ( - "archive/tar" - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - mathrand "math/rand" - "net" - "net/http" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/fsouza/go-dockerclient" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy" - "github.com/fsouza/go-dockerclient/external/github.com/gorilla/mux" -) - -var nameRegexp = regexp.MustCompile(`^[a-zA-Z0-9][a-zA-Z0-9_.-]+$`) - -// DockerServer represents a programmable, concurrent (not much), HTTP server -// implementing a fake version of the Docker remote API. -// -// It can used in standalone mode, listening for connections or as an arbitrary -// HTTP handler. -// -// For more details on the remote API, check http://goo.gl/G3plxW. -type DockerServer struct { - containers []*docker.Container - execs []*docker.ExecInspect - execMut sync.RWMutex - cMut sync.RWMutex - images []docker.Image - iMut sync.RWMutex - imgIDs map[string]string - networks []*docker.Network - netMut sync.RWMutex - listener net.Listener - mux *mux.Router - hook func(*http.Request) - failures map[string]string - multiFailures []map[string]string - execCallbacks map[string]func() - statsCallbacks map[string]func(string) docker.Stats - customHandlers map[string]http.Handler - handlerMutex sync.RWMutex - cChan chan<- *docker.Container -} - -// NewServer returns a new instance of the fake server, in standalone mode. Use -// the method URL to get the URL of the server. -// -// It receives the bind address (use 127.0.0.1:0 for getting an available port -// on the host), a channel of containers and a hook function, that will be -// called on every request. -// -// The fake server will send containers in the channel whenever the container -// changes its state, via the HTTP API (i.e.: create, start and stop). This -// channel may be nil, which means that the server won't notify on state -// changes. -func NewServer(bind string, containerChan chan<- *docker.Container, hook func(*http.Request)) (*DockerServer, error) { - listener, err := net.Listen("tcp", bind) - if err != nil { - return nil, err - } - server := DockerServer{ - listener: listener, - imgIDs: make(map[string]string), - hook: hook, - failures: make(map[string]string), - execCallbacks: make(map[string]func()), - statsCallbacks: make(map[string]func(string) docker.Stats), - customHandlers: make(map[string]http.Handler), - cChan: containerChan, - } - server.buildMuxer() - go http.Serve(listener, &server) - return &server, nil -} - -func (s *DockerServer) notify(container *docker.Container) { - if s.cChan != nil { - s.cChan <- container - } -} - -func (s *DockerServer) buildMuxer() { - s.mux = mux.NewRouter() - s.mux.Path("/commit").Methods("POST").HandlerFunc(s.handlerWrapper(s.commitContainer)) - s.mux.Path("/containers/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listContainers)) - s.mux.Path("/containers/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.createContainer)) - s.mux.Path("/containers/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectContainer)) - s.mux.Path("/containers/{id:.*}/rename").Methods("POST").HandlerFunc(s.handlerWrapper(s.renameContainer)) - s.mux.Path("/containers/{id:.*}/top").Methods("GET").HandlerFunc(s.handlerWrapper(s.topContainer)) - s.mux.Path("/containers/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startContainer)) - s.mux.Path("/containers/{id:.*}/kill").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) - s.mux.Path("/containers/{id:.*}/stop").Methods("POST").HandlerFunc(s.handlerWrapper(s.stopContainer)) - s.mux.Path("/containers/{id:.*}/pause").Methods("POST").HandlerFunc(s.handlerWrapper(s.pauseContainer)) - s.mux.Path("/containers/{id:.*}/unpause").Methods("POST").HandlerFunc(s.handlerWrapper(s.unpauseContainer)) - s.mux.Path("/containers/{id:.*}/wait").Methods("POST").HandlerFunc(s.handlerWrapper(s.waitContainer)) - s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer)) - s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer)) - s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer)) - s.mux.Path("/containers/{id:.*}/stats").Methods("GET").HandlerFunc(s.handlerWrapper(s.statsContainer)) - s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer)) - s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer)) - s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer)) - s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage)) - s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage)) - s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages)) - s.mux.Path("/images/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeImage)) - s.mux.Path("/images/{name:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectImage)) - s.mux.Path("/images/{name:.*}/push").Methods("POST").HandlerFunc(s.handlerWrapper(s.pushImage)) - s.mux.Path("/images/{name:.*}/tag").Methods("POST").HandlerFunc(s.handlerWrapper(s.tagImage)) - s.mux.Path("/events").Methods("GET").HandlerFunc(s.listEvents) - s.mux.Path("/_ping").Methods("GET").HandlerFunc(s.handlerWrapper(s.pingDocker)) - s.mux.Path("/images/load").Methods("POST").HandlerFunc(s.handlerWrapper(s.loadImage)) - s.mux.Path("/images/{id:.*}/get").Methods("GET").HandlerFunc(s.handlerWrapper(s.getImage)) - s.mux.Path("/networks").Methods("GET").HandlerFunc(s.handlerWrapper(s.listNetworks)) - s.mux.Path("/networks/{id:.*}").Methods("GET").HandlerFunc(s.handlerWrapper(s.networkInfo)) - s.mux.Path("/networks").Methods("POST").HandlerFunc(s.handlerWrapper(s.createNetwork)) -} - -// SetHook changes the hook function used by the server. -// -// The hook function is a function called on every request. -func (s *DockerServer) SetHook(hook func(*http.Request)) { - s.hook = hook -} - -// PrepareExec adds a callback to a container exec in the fake server. -// -// This function will be called whenever the given exec id is started, and the -// given exec id will remain in the "Running" start while the function is -// running, so it's useful for emulating an exec that runs for two seconds, for -// example: -// -// opts := docker.CreateExecOptions{ -// AttachStdin: true, -// AttachStdout: true, -// AttachStderr: true, -// Tty: true, -// Cmd: []string{"/bin/bash", "-l"}, -// } -// // Client points to a fake server. -// exec, err := client.CreateExec(opts) -// // handle error -// server.PrepareExec(exec.ID, func() {time.Sleep(2 * time.Second)}) -// err = client.StartExec(exec.ID, docker.StartExecOptions{Tty: true}) // will block for 2 seconds -// // handle error -func (s *DockerServer) PrepareExec(id string, callback func()) { - s.execCallbacks[id] = callback -} - -// PrepareStats adds a callback that will be called for each container stats -// call. -// -// This callback function will be called multiple times if stream is set to -// true when stats is called. -func (s *DockerServer) PrepareStats(id string, callback func(string) docker.Stats) { - s.statsCallbacks[id] = callback -} - -// PrepareFailure adds a new expected failure based on a URL regexp it receives -// an id for the failure. -func (s *DockerServer) PrepareFailure(id string, urlRegexp string) { - s.failures[id] = urlRegexp -} - -// PrepareMultiFailures enqueues a new expected failure based on a URL regexp -// it receives an id for the failure. -func (s *DockerServer) PrepareMultiFailures(id string, urlRegexp string) { - s.multiFailures = append(s.multiFailures, map[string]string{"error": id, "url": urlRegexp}) -} - -// ResetFailure removes an expected failure identified by the given id. -func (s *DockerServer) ResetFailure(id string) { - delete(s.failures, id) -} - -// ResetMultiFailures removes all enqueued failures. -func (s *DockerServer) ResetMultiFailures() { - s.multiFailures = []map[string]string{} -} - -// CustomHandler registers a custom handler for a specific path. -// -// For example: -// -// server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { -// http.Error(w, "Something wrong is not right", http.StatusInternalServerError) -// })) -func (s *DockerServer) CustomHandler(path string, handler http.Handler) { - s.handlerMutex.Lock() - s.customHandlers[path] = handler - s.handlerMutex.Unlock() -} - -// MutateContainer changes the state of a container, returning an error if the -// given id does not match to any container "running" in the server. -func (s *DockerServer) MutateContainer(id string, state docker.State) error { - for _, container := range s.containers { - if container.ID == id { - container.State = state - return nil - } - } - return errors.New("container not found") -} - -// Stop stops the server. -func (s *DockerServer) Stop() { - if s.listener != nil { - s.listener.Close() - } -} - -// URL returns the HTTP URL of the server. -func (s *DockerServer) URL() string { - if s.listener == nil { - return "" - } - return "http://" + s.listener.Addr().String() + "/" -} - -// ServeHTTP handles HTTP requests sent to the server. -func (s *DockerServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { - s.handlerMutex.RLock() - defer s.handlerMutex.RUnlock() - for re, handler := range s.customHandlers { - if m, _ := regexp.MatchString(re, r.URL.Path); m { - handler.ServeHTTP(w, r) - return - } - } - s.mux.ServeHTTP(w, r) - if s.hook != nil { - s.hook(r) - } -} - -// DefaultHandler returns default http.Handler mux, it allows customHandlers to -// call the default behavior if wanted. -func (s *DockerServer) DefaultHandler() http.Handler { - return s.mux -} - -func (s *DockerServer) handlerWrapper(f func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - for errorID, urlRegexp := range s.failures { - matched, err := regexp.MatchString(urlRegexp, r.URL.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !matched { - continue - } - http.Error(w, errorID, http.StatusBadRequest) - return - } - for i, failure := range s.multiFailures { - matched, err := regexp.MatchString(failure["url"], r.URL.Path) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !matched { - continue - } - http.Error(w, failure["error"], http.StatusBadRequest) - s.multiFailures = append(s.multiFailures[:i], s.multiFailures[i+1:]...) - return - } - f(w, r) - } -} - -func (s *DockerServer) listContainers(w http.ResponseWriter, r *http.Request) { - all := r.URL.Query().Get("all") - s.cMut.RLock() - result := make([]docker.APIContainers, 0, len(s.containers)) - for _, container := range s.containers { - if all == "1" || container.State.Running { - result = append(result, docker.APIContainers{ - ID: container.ID, - Image: container.Image, - Command: fmt.Sprintf("%s %s", container.Path, strings.Join(container.Args, " ")), - Created: container.Created.Unix(), - Status: container.State.String(), - Ports: container.NetworkSettings.PortMappingAPI(), - Names: []string{fmt.Sprintf("/%s", container.Name)}, - }) - } - } - s.cMut.RUnlock() - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) listImages(w http.ResponseWriter, r *http.Request) { - s.cMut.RLock() - result := make([]docker.APIImages, len(s.images)) - for i, image := range s.images { - result[i] = docker.APIImages{ - ID: image.ID, - Created: image.Created.Unix(), - } - for tag, id := range s.imgIDs { - if id == image.ID { - result[i].RepoTags = append(result[i].RepoTags, tag) - } - } - } - s.cMut.RUnlock() - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) findImage(id string) (string, error) { - s.iMut.RLock() - defer s.iMut.RUnlock() - image, ok := s.imgIDs[id] - if ok { - return image, nil - } - image, _, err := s.findImageByID(id) - return image, err -} - -func (s *DockerServer) findImageByID(id string) (string, int, error) { - s.iMut.RLock() - defer s.iMut.RUnlock() - for i, image := range s.images { - if image.ID == id { - return image.ID, i, nil - } - } - return "", -1, errors.New("No such image") -} - -func (s *DockerServer) createContainer(w http.ResponseWriter, r *http.Request) { - var config struct { - *docker.Config - HostConfig *docker.HostConfig - } - defer r.Body.Close() - err := json.NewDecoder(r.Body).Decode(&config) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - name := r.URL.Query().Get("name") - if name != "" && !nameRegexp.MatchString(name) { - http.Error(w, "Invalid container name", http.StatusInternalServerError) - return - } - if _, err := s.findImage(config.Image); err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - ports := map[docker.Port][]docker.PortBinding{} - for port := range config.ExposedPorts { - ports[port] = []docker.PortBinding{{ - HostIP: "0.0.0.0", - HostPort: strconv.Itoa(mathrand.Int() % 65536), - }} - } - - //the container may not have cmd when using a Dockerfile - var path string - var args []string - if len(config.Cmd) == 1 { - path = config.Cmd[0] - } else if len(config.Cmd) > 1 { - path = config.Cmd[0] - args = config.Cmd[1:] - } - - generatedID := s.generateID() - config.Config.Hostname = generatedID[:12] - container := docker.Container{ - Name: name, - ID: generatedID, - Created: time.Now(), - Path: path, - Args: args, - Config: config.Config, - HostConfig: config.HostConfig, - State: docker.State{ - Running: false, - Pid: mathrand.Int() % 50000, - ExitCode: 0, - StartedAt: time.Now(), - }, - Image: config.Image, - NetworkSettings: &docker.NetworkSettings{ - IPAddress: fmt.Sprintf("172.16.42.%d", mathrand.Int()%250+2), - IPPrefixLen: 24, - Gateway: "172.16.42.1", - Bridge: "docker0", - Ports: ports, - }, - } - s.cMut.Lock() - if container.Name != "" { - for _, c := range s.containers { - if c.Name == container.Name { - defer s.cMut.Unlock() - http.Error(w, "there's already a container with this name", http.StatusConflict) - return - } - } - } - s.containers = append(s.containers, &container) - s.cMut.Unlock() - w.WriteHeader(http.StatusCreated) - s.notify(&container) - var c = struct{ ID string }{ID: container.ID} - json.NewEncoder(w).Encode(c) -} - -func (s *DockerServer) generateID() string { - var buf [16]byte - rand.Read(buf[:]) - return fmt.Sprintf("%x", buf) -} - -func (s *DockerServer) renameContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, index, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - copy := *container - copy.Name = r.URL.Query().Get("name") - s.cMut.Lock() - defer s.cMut.Unlock() - if s.containers[index].ID == copy.ID { - s.containers[index] = © - } - w.WriteHeader(http.StatusNoContent) -} - -func (s *DockerServer) inspectContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(container) -} - -func (s *DockerServer) statsContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - _, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - stream, _ := strconv.ParseBool(r.URL.Query().Get("stream")) - callback := s.statsCallbacks[id] - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - encoder := json.NewEncoder(w) - for { - var stats docker.Stats - if callback != nil { - stats = callback(id) - } - encoder.Encode(stats) - if !stream { - break - } - } -} - -func (s *DockerServer) topContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - if !container.State.Running { - w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "Container %s is not running", id) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - result := docker.TopResult{ - Titles: []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}, - Processes: [][]string{ - {"root", "7535", "7516", "0", "03:20", "?", "00:00:00", container.Path + " " + strings.Join(container.Args, " ")}, - }, - } - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) startContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - defer r.Body.Close() - var hostConfig docker.HostConfig - err = json.NewDecoder(r.Body).Decode(&hostConfig) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - container.HostConfig = &hostConfig - if container.State.Running { - http.Error(w, "", http.StatusNotModified) - return - } - container.State.Running = true - s.notify(container) -} - -func (s *DockerServer) stopContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - if !container.State.Running { - http.Error(w, "Container not running", http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusNoContent) - container.State.Running = false - s.notify(container) -} - -func (s *DockerServer) pauseContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - if container.State.Paused { - http.Error(w, "Container already paused", http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusNoContent) - container.State.Paused = true -} - -func (s *DockerServer) unpauseContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - s.cMut.Lock() - defer s.cMut.Unlock() - if !container.State.Paused { - http.Error(w, "Container not paused", http.StatusBadRequest) - return - } - w.WriteHeader(http.StatusNoContent) - container.State.Paused = false -} - -func (s *DockerServer) attachContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - hijacker, ok := w.(http.Hijacker) - if !ok { - http.Error(w, "cannot hijack connection", http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.raw-stream") - w.WriteHeader(http.StatusOK) - conn, _, err := hijacker.Hijack() - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - wg := sync.WaitGroup{} - if r.URL.Query().Get("stdin") == "1" { - wg.Add(1) - go func() { - ioutil.ReadAll(conn) - wg.Done() - }() - } - outStream := stdcopy.NewStdWriter(conn, stdcopy.Stdout) - if container.State.Running { - fmt.Fprintf(outStream, "Container is running\n") - } else { - fmt.Fprintf(outStream, "Container is not running\n") - } - fmt.Fprintln(outStream, "What happened?") - fmt.Fprintln(outStream, "Something happened") - wg.Wait() - if r.URL.Query().Get("stream") == "1" { - for { - time.Sleep(1e6) - s.cMut.RLock() - if !container.State.Running { - s.cMut.RUnlock() - break - } - s.cMut.RUnlock() - } - } - conn.Close() -} - -func (s *DockerServer) waitContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - for { - time.Sleep(1e6) - s.cMut.RLock() - if !container.State.Running { - s.cMut.RUnlock() - break - } - s.cMut.RUnlock() - } - result := map[string]int{"StatusCode": container.State.ExitCode} - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) removeContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - force := r.URL.Query().Get("force") - _, index, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - if s.containers[index].State.Running && force != "1" { - msg := "Error: API error (406): Impossible to remove a running container, please stop it first" - http.Error(w, msg, http.StatusInternalServerError) - return - } - w.WriteHeader(http.StatusNoContent) - s.cMut.Lock() - defer s.cMut.Unlock() - s.containers[index] = s.containers[len(s.containers)-1] - s.containers = s.containers[:len(s.containers)-1] -} - -func (s *DockerServer) commitContainer(w http.ResponseWriter, r *http.Request) { - id := r.URL.Query().Get("container") - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - var config *docker.Config - runConfig := r.URL.Query().Get("run") - if runConfig != "" { - config = new(docker.Config) - err = json.Unmarshal([]byte(runConfig), config) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - } - w.WriteHeader(http.StatusOK) - image := docker.Image{ - ID: "img-" + container.ID, - Parent: container.Image, - Container: container.ID, - Comment: r.URL.Query().Get("m"), - Author: r.URL.Query().Get("author"), - Config: config, - } - repository := r.URL.Query().Get("repo") - tag := r.URL.Query().Get("tag") - s.iMut.Lock() - s.images = append(s.images, image) - if repository != "" { - if tag != "" { - repository += ":" + tag - } - s.imgIDs[repository] = image.ID - } - s.iMut.Unlock() - fmt.Fprintf(w, `{"ID":%q}`, image.ID) -} - -func (s *DockerServer) findContainer(idOrName string) (*docker.Container, int, error) { - s.cMut.RLock() - defer s.cMut.RUnlock() - for i, container := range s.containers { - if container.ID == idOrName || container.Name == idOrName { - return container, i, nil - } - } - return nil, -1, errors.New("No such container") -} - -func (s *DockerServer) buildImage(w http.ResponseWriter, r *http.Request) { - if ct := r.Header.Get("Content-Type"); ct == "application/tar" { - gotDockerFile := false - tr := tar.NewReader(r.Body) - for { - header, err := tr.Next() - if err != nil { - break - } - if header.Name == "Dockerfile" { - gotDockerFile = true - } - } - if !gotDockerFile { - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte("miss Dockerfile")) - return - } - } - //we did not use that Dockerfile to build image cause we are a fake Docker daemon - image := docker.Image{ - ID: s.generateID(), - Created: time.Now(), - } - - query := r.URL.Query() - repository := image.ID - if t := query.Get("t"); t != "" { - repository = t - } - s.iMut.Lock() - s.images = append(s.images, image) - s.imgIDs[repository] = image.ID - s.iMut.Unlock() - w.Write([]byte(fmt.Sprintf("Successfully built %s", image.ID))) -} - -func (s *DockerServer) pullImage(w http.ResponseWriter, r *http.Request) { - fromImageName := r.URL.Query().Get("fromImage") - tag := r.URL.Query().Get("tag") - image := docker.Image{ - ID: s.generateID(), - } - s.iMut.Lock() - s.images = append(s.images, image) - if fromImageName != "" { - if tag != "" { - fromImageName = fmt.Sprintf("%s:%s", fromImageName, tag) - } - s.imgIDs[fromImageName] = image.ID - } - s.iMut.Unlock() -} - -func (s *DockerServer) pushImage(w http.ResponseWriter, r *http.Request) { - name := mux.Vars(r)["name"] - tag := r.URL.Query().Get("tag") - if tag != "" { - name += ":" + tag - } - s.iMut.RLock() - if _, ok := s.imgIDs[name]; !ok { - s.iMut.RUnlock() - http.Error(w, "No such image", http.StatusNotFound) - return - } - s.iMut.RUnlock() - fmt.Fprintln(w, "Pushing...") - fmt.Fprintln(w, "Pushed") -} - -func (s *DockerServer) tagImage(w http.ResponseWriter, r *http.Request) { - name := mux.Vars(r)["name"] - s.iMut.RLock() - if _, ok := s.imgIDs[name]; !ok { - s.iMut.RUnlock() - http.Error(w, "No such image", http.StatusNotFound) - return - } - s.iMut.RUnlock() - s.iMut.Lock() - defer s.iMut.Unlock() - newRepo := r.URL.Query().Get("repo") - newTag := r.URL.Query().Get("tag") - if newTag != "" { - newRepo += ":" + newTag - } - s.imgIDs[newRepo] = s.imgIDs[name] - w.WriteHeader(http.StatusCreated) -} - -func (s *DockerServer) removeImage(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - s.iMut.RLock() - var tag string - if img, ok := s.imgIDs[id]; ok { - id, tag = img, id - } - var tags []string - for tag, taggedID := range s.imgIDs { - if taggedID == id { - tags = append(tags, tag) - } - } - s.iMut.RUnlock() - _, index, err := s.findImageByID(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.WriteHeader(http.StatusNoContent) - s.iMut.Lock() - defer s.iMut.Unlock() - if len(tags) < 2 { - s.images[index] = s.images[len(s.images)-1] - s.images = s.images[:len(s.images)-1] - } - if tag != "" { - delete(s.imgIDs, tag) - } -} - -func (s *DockerServer) inspectImage(w http.ResponseWriter, r *http.Request) { - name := mux.Vars(r)["name"] - s.iMut.RLock() - defer s.iMut.RUnlock() - if id, ok := s.imgIDs[name]; ok { - for _, img := range s.images { - if img.ID == id { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(img) - return - } - } - } - http.Error(w, "not found", http.StatusNotFound) -} - -func (s *DockerServer) listEvents(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - var events [][]byte - count := mathrand.Intn(20) - for i := 0; i < count; i++ { - data, err := json.Marshal(s.generateEvent()) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - events = append(events, data) - } - w.WriteHeader(http.StatusOK) - for _, d := range events { - fmt.Fprintln(w, d) - time.Sleep(time.Duration(mathrand.Intn(200)) * time.Millisecond) - } -} - -func (s *DockerServer) pingDocker(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func (s *DockerServer) generateEvent() *docker.APIEvents { - var eventType string - switch mathrand.Intn(4) { - case 0: - eventType = "create" - case 1: - eventType = "start" - case 2: - eventType = "stop" - case 3: - eventType = "destroy" - } - return &docker.APIEvents{ - ID: s.generateID(), - Status: eventType, - From: "mybase:latest", - Time: time.Now().Unix(), - } -} - -func (s *DockerServer) loadImage(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) -} - -func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/tar") -} - -func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - container, _, err := s.findContainer(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - exec := docker.ExecInspect{ - ID: s.generateID(), - Container: *container, - } - var params docker.CreateExecOptions - err = json.NewDecoder(r.Body).Decode(¶ms) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if len(params.Cmd) > 0 { - exec.ProcessConfig.EntryPoint = params.Cmd[0] - if len(params.Cmd) > 1 { - exec.ProcessConfig.Arguments = params.Cmd[1:] - } - } - s.execMut.Lock() - s.execs = append(s.execs, &exec) - s.execMut.Unlock() - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID}) -} - -func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - if exec, err := s.getExec(id, false); err == nil { - s.execMut.Lock() - exec.Running = true - s.execMut.Unlock() - if callback, ok := s.execCallbacks[id]; ok { - callback() - delete(s.execCallbacks, id) - } else if callback, ok := s.execCallbacks["*"]; ok { - callback() - delete(s.execCallbacks, "*") - } - s.execMut.Lock() - exec.Running = false - s.execMut.Unlock() - w.WriteHeader(http.StatusOK) - return - } - w.WriteHeader(http.StatusNotFound) -} - -func (s *DockerServer) resizeExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - if _, err := s.getExec(id, false); err == nil { - w.WriteHeader(http.StatusOK) - return - } - w.WriteHeader(http.StatusNotFound) -} - -func (s *DockerServer) inspectExecContainer(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - if exec, err := s.getExec(id, true); err == nil { - w.WriteHeader(http.StatusOK) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(exec) - return - } - w.WriteHeader(http.StatusNotFound) -} - -func (s *DockerServer) getExec(id string, copy bool) (*docker.ExecInspect, error) { - s.execMut.RLock() - defer s.execMut.RUnlock() - for _, exec := range s.execs { - if exec.ID == id { - if copy { - cp := *exec - exec = &cp - } - return exec, nil - } - } - return nil, errors.New("exec not found") -} - -func (s *DockerServer) findNetwork(idOrName string) (*docker.Network, int, error) { - s.netMut.RLock() - defer s.netMut.RUnlock() - for i, network := range s.networks { - if network.ID == idOrName || network.Name == idOrName { - return network, i, nil - } - } - return nil, -1, errors.New("No such network") -} - -func (s *DockerServer) listNetworks(w http.ResponseWriter, r *http.Request) { - s.netMut.RLock() - result := make([]docker.Network, 0, len(s.networks)) - for _, network := range s.networks { - result = append(result, *network) - } - s.netMut.RUnlock() - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(result) -} - -func (s *DockerServer) networkInfo(w http.ResponseWriter, r *http.Request) { - id := mux.Vars(r)["id"] - network, _, err := s.findNetwork(id) - if err != nil { - http.Error(w, err.Error(), http.StatusNotFound) - return - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - json.NewEncoder(w).Encode(network) -} - -// isValidName validates configuration objects supported by libnetwork -func isValidName(name string) bool { - if name == "" || strings.Contains(name, ".") { - return false - } - return true -} - -func (s *DockerServer) createNetwork(w http.ResponseWriter, r *http.Request) { - var config *docker.CreateNetworkOptions - defer r.Body.Close() - err := json.NewDecoder(r.Body).Decode(&config) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - if !isValidName(config.Name) { - http.Error(w, "Invalid network name", http.StatusBadRequest) - return - } - if n, _, _ := s.findNetwork(config.Name); n != nil { - http.Error(w, "network already exists", http.StatusForbidden) - return - } - - generatedID := s.generateID() - network := docker.Network{ - Name: config.Name, - ID: generatedID, - Type: config.NetworkType, - } - s.netMut.Lock() - s.networks = append(s.networks, &network) - s.netMut.Unlock() - w.WriteHeader(http.StatusCreated) - var c = struct{ ID string }{ID: network.ID} - json.NewEncoder(w).Encode(c) -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go deleted file mode 100644 index bb24ce560..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go +++ /dev/null @@ -1,1843 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package testing - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/http/httptest" - "os" - "reflect" - "strings" - "sync" - "testing" - "time" - - "github.com/fsouza/go-dockerclient" -) - -func TestNewServer(t *testing.T) { - server, err := NewServer("127.0.0.1:0", nil, nil) - if err != nil { - t.Fatal(err) - } - defer server.listener.Close() - conn, err := net.Dial("tcp", server.listener.Addr().String()) - if err != nil { - t.Fatal(err) - } - conn.Close() -} - -func TestServerStop(t *testing.T) { - server, err := NewServer("127.0.0.1:0", nil, nil) - if err != nil { - t.Fatal(err) - } - server.Stop() - _, err = net.Dial("tcp", server.listener.Addr().String()) - if err == nil { - t.Error("Unexpected error when dialing to stopped server") - } -} - -func TestServerStopNoListener(t *testing.T) { - server := DockerServer{} - server.Stop() -} - -func TestServerURL(t *testing.T) { - server, err := NewServer("127.0.0.1:0", nil, nil) - if err != nil { - t.Fatal(err) - } - defer server.Stop() - url := server.URL() - if expected := "http://" + server.listener.Addr().String() + "/"; url != expected { - t.Errorf("DockerServer.URL(): Want %q. Got %q.", expected, url) - } -} - -func TestServerURLNoListener(t *testing.T) { - server := DockerServer{} - url := server.URL() - if url != "" { - t.Errorf("DockerServer.URL(): Expected empty URL on handler mode, got %q.", url) - } -} - -func TestHandleWithHook(t *testing.T) { - var called bool - server, _ := NewServer("127.0.0.1:0", nil, func(*http.Request) { called = true }) - defer server.Stop() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if !called { - t.Error("ServeHTTP did not call the hook function.") - } -} - -func TestSetHook(t *testing.T) { - var called bool - server, _ := NewServer("127.0.0.1:0", nil, nil) - defer server.Stop() - server.SetHook(func(*http.Request) { called = true }) - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if !called { - t.Error("ServeHTTP did not call the hook function.") - } -} - -func TestCustomHandler(t *testing.T) { - var called bool - server, _ := NewServer("127.0.0.1:0", nil, nil) - addContainers(server, 2) - server.CustomHandler("/containers/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - called = true - fmt.Fprint(w, "Hello world") - })) - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if !called { - t.Error("Did not call the custom handler") - } - if got := recorder.Body.String(); got != "Hello world" { - t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) - } -} - -func TestCustomHandlerRegexp(t *testing.T) { - var called bool - server, _ := NewServer("127.0.0.1:0", nil, nil) - addContainers(server, 2) - server.CustomHandler("/containers/.*/json", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - called = true - fmt.Fprint(w, "Hello world") - })) - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/.*/json?all=1", nil) - server.ServeHTTP(recorder, request) - if !called { - t.Error("Did not call the custom handler") - } - if got := recorder.Body.String(); got != "Hello world" { - t.Errorf("Wrong output for custom handler: want %q. Got %q.", "Hello world", got) - } -} - -func TestListContainers(t *testing.T) { - server := DockerServer{} - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("ListContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := make([]docker.APIContainers, 2) - for i, container := range server.containers { - expected[i] = docker.APIContainers{ - ID: container.ID, - Image: container.Image, - Command: strings.Join(container.Config.Cmd, " "), - Created: container.Created.Unix(), - Status: container.State.String(), - Ports: container.NetworkSettings.PortMappingAPI(), - Names: []string{"/" + container.Name}, - } - } - var got []docker.APIContainers - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("ListContainers. Want %#v. Got %#v.", expected, got) - } -} - -func TestListRunningContainers(t *testing.T) { - server := DockerServer{} - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=0", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("ListRunningContainers: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - var got []docker.APIContainers - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if len(got) != 0 { - t.Errorf("ListRunningContainers: Want 0. Got %d.", len(got)) - } -} - -func TestCreateContainer(t *testing.T) { - server := DockerServer{} - server.imgIDs = map[string]string{"base": "a1234"} - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, -"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` - request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusCreated { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) - } - var returned docker.Container - err := json.NewDecoder(recorder.Body).Decode(&returned) - if err != nil { - t.Fatal(err) - } - stored := server.containers[0] - if returned.ID != stored.ID { - t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) - } - if stored.State.Running { - t.Errorf("CreateContainer should not set container to running state.") - } - if stored.Config.User != "ubuntu" { - t.Errorf("CreateContainer: wrong config. Expected: %q. Returned: %q.", "ubuntu", stored.Config.User) - } - if stored.Config.Hostname != returned.ID[:12] { - t.Errorf("CreateContainer: wrong hostname. Expected: %q. Returned: %q.", returned.ID[:12], stored.Config.Hostname) - } - expectedBind := []string{"/var/run/docker.sock:/var/run/docker.sock:rw"} - if !reflect.DeepEqual(stored.HostConfig.Binds, expectedBind) { - t.Errorf("CreateContainer: wrong host config. Expected: %v. Returned %v.", expectedBind, stored.HostConfig.Binds) - } -} - -func TestCreateContainerWithNotifyChannel(t *testing.T) { - ch := make(chan *docker.Container, 1) - server := DockerServer{} - server.imgIDs = map[string]string{"base": "a1234"} - server.cChan = ch - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, -"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":""}` - request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusCreated { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) - } - if notified := <-ch; notified != server.containers[0] { - t.Errorf("CreateContainer: did not notify the proper container. Want %q. Got %q.", server.containers[0].ID, notified.ID) - } -} - -func TestCreateContainerInvalidBody(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader("whaaaaaat---")) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } -} - -func TestCreateContainerDuplicateName(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - server.imgIDs = map[string]string{"base": "a1234"} - addContainers(&server, 1) - server.containers[0].Name = "mycontainer" - recorder := httptest.NewRecorder() - body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, -"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` - request, _ := http.NewRequest("POST", "/containers/create?name=mycontainer", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusConflict { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusConflict, recorder.Code) - } -} - -func TestCreateMultipleContainersEmptyName(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - server.imgIDs = map[string]string{"base": "a1234"} - addContainers(&server, 1) - server.containers[0].Name = "" - recorder := httptest.NewRecorder() - body := `{"Hostname":"", "User":"ubuntu", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, -"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], "Image":"base", "Volumes":{}, "VolumesFrom":"","HostConfig":{"Binds":["/var/run/docker.sock:/var/run/docker.sock:rw"]}}` - request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusCreated { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) - } - var returned docker.Container - err := json.NewDecoder(recorder.Body).Decode(&returned) - if err != nil { - t.Fatal(err) - } - stored := server.containers[1] - if returned.ID != stored.ID { - t.Errorf("CreateContainer: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned.ID) - } - if stored.State.Running { - t.Errorf("CreateContainer should not set container to running state.") - } - if stored.Config.User != "ubuntu" { - t.Errorf("CreateContainer: wrong config. Expected: %q. Returned: %q.", "ubuntu", stored.Config.User) - } - expectedBind := []string{"/var/run/docker.sock:/var/run/docker.sock:rw"} - if !reflect.DeepEqual(stored.HostConfig.Binds, expectedBind) { - t.Errorf("CreateContainer: wrong host config. Expected: %v. Returned %v.", expectedBind, stored.HostConfig.Binds) - } -} - -func TestCreateContainerInvalidName(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, -"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], -"Image":"base", "Volumes":{}, "VolumesFrom":""}` - request, _ := http.NewRequest("POST", "/containers/create?name=myapp/container1", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusInternalServerError { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) - } - expectedBody := "Invalid container name\n" - if got := recorder.Body.String(); got != expectedBody { - t.Errorf("CreateContainer: wrong body. Want %q. Got %q.", expectedBody, got) - } -} - -func TestCreateContainerImageNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Hostname":"", "User":"", "Memory":0, "MemorySwap":0, "AttachStdin":false, "AttachStdout":true, "AttachStderr":true, -"PortSpecs":null, "Tty":false, "OpenStdin":false, "StdinOnce":false, "Env":null, "Cmd":["date"], -"Image":"base", "Volumes":{}, "VolumesFrom":""}` - request, _ := http.NewRequest("POST", "/containers/create", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("CreateContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestRenameContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - newName := server.containers[0].Name + "abc" - path := fmt.Sprintf("/containers/%s/rename?name=%s", server.containers[0].ID, newName) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("RenameContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - container := server.containers[0] - if container.Name != newName { - t.Errorf("RenameContainer: did not rename the container. Want %q. Got %q.", newName, container.Name) - } -} - -func TestRenameContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/containers/blabla/rename?name=something", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("RenameContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestCommitContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("CommitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := fmt.Sprintf(`{"ID":"%s"}`, server.images[0].ID) - if got := recorder.Body.String(); got != expected { - t.Errorf("CommitContainer: wrong response body. Want %q. Got %q.", expected, got) - } -} - -func TestCommitContainerComplete(t *testing.T) { - server := DockerServer{} - server.imgIDs = make(map[string]string) - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&m=saving&author=developers" - queryString += `&run={"Cmd": ["cat", "/world"],"PortSpecs":["22"]}` - request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) - server.ServeHTTP(recorder, request) - image := server.images[0] - if image.Parent != server.containers[0].Image { - t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) - } - if image.Container != server.containers[0].ID { - t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) - } - message := "saving" - if image.Comment != message { - t.Errorf("CommitContainer: wrong comment (commit message). Want %q. Got %q.", message, image.Comment) - } - author := "developers" - if image.Author != author { - t.Errorf("CommitContainer: wrong author. Want %q. Got %q.", author, image.Author) - } - if id := server.imgIDs["tsuru/python"]; id != image.ID { - t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) - } - portSpecs := []string{"22"} - if !reflect.DeepEqual(image.Config.PortSpecs, portSpecs) { - t.Errorf("CommitContainer: wrong port spec in config. Want %#v. Got %#v.", portSpecs, image.Config.PortSpecs) - } - cmd := []string{"cat", "/world"} - if !reflect.DeepEqual(image.Config.Cmd, cmd) { - t.Errorf("CommitContainer: wrong cmd in config. Want %#v. Got %#v.", cmd, image.Config.Cmd) - } -} - -func TestCommitContainerWithTag(t *testing.T) { - server := DockerServer{} - server.imgIDs = make(map[string]string) - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - queryString := "container=" + server.containers[0].ID + "&repo=tsuru/python&tag=v1" - request, _ := http.NewRequest("POST", "/commit?"+queryString, nil) - server.ServeHTTP(recorder, request) - image := server.images[0] - if image.Parent != server.containers[0].Image { - t.Errorf("CommitContainer: wrong parent image. Want %q. Got %q.", server.containers[0].Image, image.Parent) - } - if image.Container != server.containers[0].ID { - t.Errorf("CommitContainer: wrong container. Want %q. Got %q.", server.containers[0].ID, image.Container) - } - if id := server.imgIDs["tsuru/python:v1"]; id != image.ID { - t.Errorf("CommitContainer: wrong ID saved for repository. Want %q. Got %q.", image.ID, id) - } -} - -func TestCommitContainerInvalidRun(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/commit?container="+server.containers[0].ID+"&run=abc---", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } -} - -func TestCommitContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/commit?container=abc123", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("CommitContainer. Wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestInspectContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/json", server.containers[0].ID) - request, _ := http.NewRequest("GET", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("InspectContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := server.containers[0] - var got docker.Container - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got.Config, expected.Config) { - t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) - } - if !reflect.DeepEqual(got.NetworkSettings, expected.NetworkSettings) { - t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) - } - got.State.StartedAt = expected.State.StartedAt - got.State.FinishedAt = expected.State.FinishedAt - got.Config = expected.Config - got.Created = expected.Created - got.NetworkSettings = expected.NetworkSettings - if !reflect.DeepEqual(got, *expected) { - t.Errorf("InspectContainer: wrong value. Want %#v. Got %#v.", *expected, got) - } -} - -func TestInspectContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/abc123/json", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("InspectContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestTopContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) - request, _ := http.NewRequest("GET", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - var got docker.TopResult - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got.Titles, []string{"UID", "PID", "PPID", "C", "STIME", "TTY", "TIME", "CMD"}) { - t.Fatalf("TopContainer: Unexpected titles, got: %#v", got.Titles) - } - if len(got.Processes) != 1 { - t.Fatalf("TopContainer: Unexpected process len, got: %d", len(got.Processes)) - } - if got.Processes[0][len(got.Processes[0])-1] != "ls -la .." { - t.Fatalf("TopContainer: Unexpected command name, got: %s", got.Processes[0][len(got.Processes[0])-1]) - } -} - -func TestTopContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/xyz/top", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestTopContainerStopped(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/top", server.containers[0].ID) - request, _ := http.NewRequest("GET", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusInternalServerError { - t.Errorf("TopContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) - } -} - -func TestStartContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - memory := int64(536870912) - hostConfig := docker.HostConfig{Memory: memory} - configBytes, err := json.Marshal(hostConfig) - if err != nil { - t.Fatal(err) - } - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, bytes.NewBuffer(configBytes)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - if !server.containers[0].State.Running { - t.Error("StartContainer: did not set the container to running state") - } - if gotMemory := server.containers[0].HostConfig.Memory; gotMemory != memory { - t.Errorf("StartContainer: wrong HostConfig. Wants %d of memory. Got %d", memory, gotMemory) - } -} - -func TestStartContainerWithNotifyChannel(t *testing.T) { - ch := make(chan *docker.Container, 1) - server := DockerServer{} - server.cChan = ch - addContainers(&server, 1) - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/start", server.containers[1].ID) - request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("{}"))) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - if notified := <-ch; notified != server.containers[1] { - t.Errorf("StartContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) - } -} - -func TestStartContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - path := "/containers/abc123/start" - request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestStartContainerAlreadyRunning(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/start", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, bytes.NewBuffer([]byte("null"))) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotModified { - t.Errorf("StartContainer: wrong status code. Want %d. Got %d.", http.StatusNotModified, recorder.Code) - } -} - -func TestStopContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if server.containers[0].State.Running { - t.Error("StopContainer: did not stop the container") - } -} - -func TestKillContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/kill", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("KillContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if server.containers[0].State.Running { - t.Error("KillContainer: did not stop the container") - } -} - -func TestStopContainerWithNotifyChannel(t *testing.T) { - ch := make(chan *docker.Container, 1) - server := DockerServer{} - server.cChan = ch - addContainers(&server, 1) - addContainers(&server, 1) - server.containers[1].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/stop", server.containers[1].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if notified := <-ch; notified != server.containers[1] { - t.Errorf("StopContainer: did not notify the proper container. Want %q. Got %q.", server.containers[1].ID, notified.ID) - } -} - -func TestStopContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - path := "/containers/abc123/stop" - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestStopContainerNotRunning(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/stop", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("StopContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } -} - -func TestPauseContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if !server.containers[0].State.Paused { - t.Error("PauseContainer: did not pause the container") - } -} - -func TestPauseContainerAlreadyPaused(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Paused = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/pause", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } -} - -func TestPauseContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - path := "/containers/abc123/pause" - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("PauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestUnpauseContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Paused = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if server.containers[0].State.Paused { - t.Error("UnpauseContainer: did not unpause the container") - } -} - -func TestUnpauseContainerNotPaused(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/unpause", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } -} - -func TestUnpauseContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - path := "/containers/abc123/unpause" - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("UnpauseContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestWaitContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - go func() { - server.cMut.Lock() - server.containers[0].State.Running = false - server.cMut.Unlock() - }() - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := `{"StatusCode":0}` + "\n" - if body := recorder.Body.String(); body != expected { - t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) - } -} - -func TestWaitContainerStatus(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - server.containers[0].State.ExitCode = 63 - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/wait", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("WaitContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := `{"StatusCode":63}` + "\n" - if body := recorder.Body.String(); body != expected { - t.Errorf("WaitContainer: wrong body. Want %q. Got %q.", expected, body) - } -} - -func TestWaitContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - path := "/containers/abc123/wait" - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("WaitContainer: wrong status code. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -type HijackableResponseRecorder struct { - httptest.ResponseRecorder - readCh chan []byte -} - -func (r *HijackableResponseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) { - myConn, otherConn := net.Pipe() - r.readCh = make(chan []byte) - go func() { - data, _ := ioutil.ReadAll(myConn) - r.readCh <- data - }() - return otherConn, nil, nil -} - -func (r *HijackableResponseRecorder) HijackBuffer() string { - return string(<-r.readCh) -} - -func TestAttachContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := &HijackableResponseRecorder{} - path := fmt.Sprintf("/containers/%s/attach?logs=1", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - lines := []string{ - "\x01\x00\x00\x00\x00\x00\x00\x15Container is running", - "\x01\x00\x00\x00\x00\x00\x00\x0fWhat happened?", - "\x01\x00\x00\x00\x00\x00\x00\x13Something happened", - } - expected := strings.Join(lines, "\n") + "\n" - if body := recorder.HijackBuffer(); body != expected { - t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) - } -} - -func TestAttachContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := &HijackableResponseRecorder{} - path := "/containers/abc123/attach?logs=1" - request, _ := http.NewRequest("POST", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("AttachContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestAttachContainerWithStreamBlocks(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - path := fmt.Sprintf("/containers/%s/attach?logs=1&stdout=1&stream=1", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, nil) - done := make(chan string) - go func() { - recorder := &HijackableResponseRecorder{} - server.ServeHTTP(recorder, request) - done <- recorder.HijackBuffer() - }() - select { - case <-done: - t.Fatalf("attach stream returned before container is stopped") - case <-time.After(500 * time.Millisecond): - } - server.cMut.Lock() - server.containers[0].State.Running = false - server.cMut.Unlock() - var body string - select { - case body = <-done: - case <-time.After(5 * time.Second): - t.Fatalf("timed out waiting for attach to finish") - } - lines := []string{ - "\x01\x00\x00\x00\x00\x00\x00\x15Container is running", - "\x01\x00\x00\x00\x00\x00\x00\x0fWhat happened?", - "\x01\x00\x00\x00\x00\x00\x00\x13Something happened", - } - expected := strings.Join(lines, "\n") + "\n" - if body != expected { - t.Errorf("AttachContainer: wrong body. Want %q. Got %q.", expected, body) - } -} - -func TestRemoveContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s", server.containers[0].ID) - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if len(server.containers) > 0 { - t.Error("RemoveContainer: did not remove the container.") - } -} - -func TestRemoveContainerByName(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s", server.containers[0].Name) - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if len(server.containers) > 0 { - t.Error("RemoveContainer: did not remove the container.") - } -} - -func TestRemoveContainerNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/abc123") - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestRemoveContainerRunning(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s", server.containers[0].ID) - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusInternalServerError { - t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusInternalServerError, recorder.Code) - } - if len(server.containers) < 1 { - t.Error("RemoveContainer: should not remove the container.") - } -} - -func TestRemoveContainerRunningForce(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.containers[0].State.Running = true - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s?%s", server.containers[0].ID, "force=1") - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("RemoveContainer: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if len(server.containers) > 0 { - t.Error("RemoveContainer: did not remove the container.") - } -} - -func TestPullImage(t *testing.T) { - server := DockerServer{imgIDs: make(map[string]string)} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/create?fromImage=base", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - if len(server.images) != 1 { - t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) - } - if _, ok := server.imgIDs["base"]; !ok { - t.Error("PullImage: Repository should not be empty.") - } -} - -func TestPullImageWithTag(t *testing.T) { - server := DockerServer{imgIDs: make(map[string]string)} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/create?fromImage=base&tag=tag", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("PullImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - if len(server.images) != 1 { - t.Errorf("PullImage: Want 1 image. Got %d.", len(server.images)) - } - if _, ok := server.imgIDs["base:tag"]; !ok { - t.Error("PullImage: Repository should not be empty.") - } -} - -func TestPushImage(t *testing.T) { - server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } -} - -func TestPushImageWithTag(t *testing.T) { - server := DockerServer{imgIDs: map[string]string{"tsuru/python:v1": "a123"}} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/tsuru/python/push?tag=v1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } -} - -func TestPushImageNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/tsuru/python/push", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("PushImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func TestTagImage(t *testing.T) { - server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusCreated { - t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) - } - if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python"] { - t.Errorf("TagImage: did not tag the image") - } -} - -func TestTagImageWithRepoAndTag(t *testing.T) { - server := DockerServer{imgIDs: map[string]string{"tsuru/python": "a123"}} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/tsuru/python/tag?repo=tsuru/new-python&tag=v1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusCreated { - t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) - } - if server.imgIDs["tsuru/python"] != server.imgIDs["tsuru/new-python:v1"] { - t.Errorf("TagImage: did not tag the image") - } -} - -func TestTagImageNotFound(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/images/tsuru/python/tag", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNotFound { - t.Errorf("TagImage: wrong status. Want %d. Got %d.", http.StatusNotFound, recorder.Code) - } -} - -func addContainers(server *DockerServer, n int) { - server.cMut.Lock() - defer server.cMut.Unlock() - for i := 0; i < n; i++ { - date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) - container := docker.Container{ - Name: fmt.Sprintf("%x", rand.Int()%10000), - ID: fmt.Sprintf("%x", rand.Int()%10000), - Created: date, - Path: "ls", - Args: []string{"-la", ".."}, - Config: &docker.Config{ - Hostname: fmt.Sprintf("docker-%d", i), - AttachStdout: true, - AttachStderr: true, - Env: []string{"ME=you", fmt.Sprintf("NUMBER=%d", i)}, - Cmd: []string{"ls", "-la", ".."}, - Image: "base", - }, - State: docker.State{ - Running: false, - Pid: 400 + i, - ExitCode: 0, - StartedAt: date, - }, - Image: "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc", - NetworkSettings: &docker.NetworkSettings{ - IPAddress: fmt.Sprintf("10.10.10.%d", i+2), - IPPrefixLen: 24, - Gateway: "10.10.10.1", - Bridge: "docker0", - PortMapping: map[string]docker.PortMapping{ - "Tcp": {"8888": fmt.Sprintf("%d", 49600+i)}, - }, - }, - ResolvConfPath: "/etc/resolv.conf", - } - server.containers = append(server.containers, &container) - } -} - -func addImages(server *DockerServer, n int, repo bool) { - server.iMut.Lock() - defer server.iMut.Unlock() - if server.imgIDs == nil { - server.imgIDs = make(map[string]string) - } - for i := 0; i < n; i++ { - date := time.Now().Add(time.Duration((rand.Int() % (i + 1))) * time.Hour) - image := docker.Image{ - ID: fmt.Sprintf("%x", rand.Int()%10000), - Created: date, - } - server.images = append(server.images, image) - if repo { - repo := "docker/python-" + image.ID - server.imgIDs[repo] = image.ID - } - } -} - -func TestListImages(t *testing.T) { - server := DockerServer{} - addImages(&server, 2, true) - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/images/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("ListImages: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := make([]docker.APIImages, 2) - for i, image := range server.images { - expected[i] = docker.APIImages{ - ID: image.ID, - Created: image.Created.Unix(), - RepoTags: []string{"docker/python-" + image.ID}, - } - } - var got []docker.APIImages - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("ListImages. Want %#v. Got %#v.", expected, got) - } -} - -func TestRemoveImage(t *testing.T) { - server := DockerServer{} - addImages(&server, 1, false) - server.buildMuxer() - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/images/%s", server.images[0].ID) - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if len(server.images) > 0 { - t.Error("RemoveImage: did not remove the image.") - } -} - -func TestRemoveImageByName(t *testing.T) { - server := DockerServer{} - addImages(&server, 1, true) - server.buildMuxer() - recorder := httptest.NewRecorder() - imgName := "docker/python-" + server.images[0].ID - path := "/images/" + imgName - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusNoContent { - t.Errorf("RemoveImage: wrong status. Want %d. Got %d.", http.StatusNoContent, recorder.Code) - } - if len(server.images) > 0 { - t.Error("RemoveImage: did not remove the image.") - } - _, ok := server.imgIDs[imgName] - if ok { - t.Error("RemoveImage: did not remove image tag name.") - } -} - -func TestRemoveImageWithMultipleTags(t *testing.T) { - server := DockerServer{} - addImages(&server, 1, true) - server.buildMuxer() - imgID := server.images[0].ID - imgName := "docker/python-" + imgID - server.imgIDs["docker/python-wat"] = imgID - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/images/%s", imgName) - request, _ := http.NewRequest("DELETE", path, nil) - server.ServeHTTP(recorder, request) - _, ok := server.imgIDs[imgName] - if ok { - t.Error("RemoveImage: did not remove image tag name.") - } - id, ok := server.imgIDs["docker/python-wat"] - if !ok { - t.Error("RemoveImage: removed the wrong tag name.") - } - if id != imgID { - t.Error("RemoveImage: disassociated the wrong ID from the tag") - } - if len(server.images) < 1 { - t.Fatal("RemoveImage: removed the image, but should keep it") - } - if server.images[0].ID != imgID { - t.Error("RemoveImage: changed the ID of the image!") - } -} - -func TestPrepareFailure(t *testing.T) { - server := DockerServer{failures: make(map[string]string)} - server.buildMuxer() - errorID := "my_error" - server.PrepareFailure(errorID, "containers/json") - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } - if recorder.Body.String() != errorID+"\n" { - t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) - } -} - -func TestPrepareMultiFailures(t *testing.T) { - server := DockerServer{multiFailures: []map[string]string{}} - server.buildMuxer() - errorID := "multi error" - server.PrepareMultiFailures(errorID, "containers/json") - server.PrepareMultiFailures(errorID, "containers/json") - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } - if recorder.Body.String() != errorID+"\n" { - t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) - } - recorder = httptest.NewRecorder() - request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } - if recorder.Body.String() != errorID+"\n" { - t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) - } - recorder = httptest.NewRecorder() - request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - if recorder.Body.String() == errorID+"\n" { - t.Errorf("PrepareFailure: wrong message. Want %s. Got %s.", errorID, recorder.Body.String()) - } -} - -func TestRemoveFailure(t *testing.T) { - server := DockerServer{failures: make(map[string]string)} - server.buildMuxer() - errorID := "my_error" - server.PrepareFailure(errorID, "containers/json") - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("PrepareFailure: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } - server.ResetFailure(errorID) - recorder = httptest.NewRecorder() - request, _ = http.NewRequest("GET", "/containers/json?all=1", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("RemoveFailure: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } -} - -func TestResetMultiFailures(t *testing.T) { - server := DockerServer{multiFailures: []map[string]string{}} - server.buildMuxer() - errorID := "multi error" - server.PrepareMultiFailures(errorID, "containers/json") - server.PrepareMultiFailures(errorID, "containers/json") - if len(server.multiFailures) != 2 { - t.Errorf("PrepareMultiFailures: error adding multi failures.") - } - server.ResetMultiFailures() - if len(server.multiFailures) != 0 { - t.Errorf("ResetMultiFailures: error reseting multi failures.") - } -} - -func TestMutateContainer(t *testing.T) { - server := DockerServer{failures: make(map[string]string)} - server.buildMuxer() - server.containers = append(server.containers, &docker.Container{ID: "id123"}) - state := docker.State{Running: false, ExitCode: 1} - err := server.MutateContainer("id123", state) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(server.containers[0].State, state) { - t.Errorf("Wrong state after mutation.\nWant %#v.\nGot %#v.", - state, server.containers[0].State) - } -} - -func TestMutateContainerNotFound(t *testing.T) { - server := DockerServer{failures: make(map[string]string)} - server.buildMuxer() - state := docker.State{Running: false, ExitCode: 1} - err := server.MutateContainer("id123", state) - if err == nil { - t.Error("Unexpected error") - } - if err.Error() != "container not found" { - t.Errorf("wrong error message. Want %q. Got %q.", "container not found", err) - } -} - -func TestBuildImageWithContentTypeTar(t *testing.T) { - server := DockerServer{imgIDs: make(map[string]string)} - imageName := "teste" - recorder := httptest.NewRecorder() - tarFile, err := os.Open("data/dockerfile.tar") - if err != nil { - t.Fatal(err) - } - defer tarFile.Close() - request, _ := http.NewRequest("POST", "/build?t=teste", tarFile) - request.Header.Add("Content-Type", "application/tar") - server.buildImage(recorder, request) - if recorder.Body.String() == "miss Dockerfile" { - t.Errorf("BuildImage: miss Dockerfile") - return - } - if _, ok := server.imgIDs[imageName]; ok == false { - t.Errorf("BuildImage: image %s not builded", imageName) - } -} - -func TestBuildImageWithRemoteDockerfile(t *testing.T) { - server := DockerServer{imgIDs: make(map[string]string)} - imageName := "teste" - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/build?t=teste&remote=http://localhost/Dockerfile", nil) - server.buildImage(recorder, request) - if _, ok := server.imgIDs[imageName]; ok == false { - t.Errorf("BuildImage: image %s not builded", imageName) - } -} - -func TestPing(t *testing.T) { - server := DockerServer{} - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/_ping", nil) - server.pingDocker(recorder, request) - if recorder.Body.String() != "" { - t.Errorf("Ping: Unexpected body: %s", recorder.Body.String()) - } - if recorder.Code != http.StatusOK { - t.Errorf("Ping: Expected code %d, got: %d", http.StatusOK, recorder.Code) - } -} - -func TestDefaultHandler(t *testing.T) { - server, err := NewServer("127.0.0.1:0", nil, nil) - if err != nil { - t.Fatal(err) - } - defer server.listener.Close() - if server.mux != server.DefaultHandler() { - t.Fatalf("DefaultHandler: Expected to return server.mux, got: %#v", server.DefaultHandler()) - } -} - -func TestCreateExecContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Cmd": ["bash", "-c", "ls"]}` - path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - serverExec := server.execs[0] - var got docker.Exec - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if got.ID != serverExec.ID { - t.Errorf("CreateExec: wrong value. Want %#v. Got %#v.", serverExec.ID, got.ID) - } - expected := docker.ExecInspect{ - ID: got.ID, - ProcessConfig: docker.ExecProcessConfig{ - EntryPoint: "bash", - Arguments: []string{"-c", "ls"}, - }, - Container: *server.containers[0], - } - if !reflect.DeepEqual(*serverExec, expected) { - t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, *serverExec) - } -} - -func TestInspectExecContainer(t *testing.T) { - server := DockerServer{} - addContainers(&server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Cmd": ["bash", "-c", "ls"]}` - path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - var got docker.Exec - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - path = fmt.Sprintf("/exec/%s/json", got.ID) - request, _ = http.NewRequest("GET", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - var got2 docker.ExecInspect - err = json.NewDecoder(recorder.Body).Decode(&got2) - if err != nil { - t.Fatal(err) - } - expected := docker.ExecInspect{ - ID: got.ID, - ProcessConfig: docker.ExecProcessConfig{ - EntryPoint: "bash", - Arguments: []string{"-c", "ls"}, - }, - Container: *server.containers[0], - } - got2.Container.State.StartedAt = expected.Container.State.StartedAt - got2.Container.State.FinishedAt = expected.Container.State.FinishedAt - got2.Container.Config = expected.Container.Config - got2.Container.Created = expected.Container.Created - got2.Container.NetworkSettings = expected.Container.NetworkSettings - if !reflect.DeepEqual(got2, expected) { - t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, got2) - } -} - -func TestStartExecContainer(t *testing.T) { - server, _ := NewServer("127.0.0.1:0", nil, nil) - addContainers(server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Cmd": ["bash", "-c", "ls"]}` - path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - var exec docker.Exec - err := json.NewDecoder(recorder.Body).Decode(&exec) - if err != nil { - t.Fatal(err) - } - unleash := make(chan bool) - server.PrepareExec(exec.ID, func() { - <-unleash - }) - codes := make(chan int, 1) - sent := make(chan bool) - go func() { - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/exec/%s/start", exec.ID) - body := `{"Tty":true}` - request, _ := http.NewRequest("POST", path, strings.NewReader(body)) - close(sent) - server.ServeHTTP(recorder, request) - codes <- recorder.Code - }() - <-sent - execInfo, err := waitExec(server.URL(), exec.ID, true, 5) - if err != nil { - t.Fatal(err) - } - if !execInfo.Running { - t.Error("StartExec: expected exec to be running, but it's not running") - } - close(unleash) - if code := <-codes; code != http.StatusOK { - t.Errorf("StartExec: wrong status. Want %d. Got %d.", http.StatusOK, code) - } - execInfo, err = waitExec(server.URL(), exec.ID, false, 5) - if err != nil { - t.Fatal(err) - } - if execInfo.Running { - t.Error("StartExec: expected exec to be not running after start returns, but it's running") - } -} - -func TestStartExecContainerWildcardCallback(t *testing.T) { - server, _ := NewServer("127.0.0.1:0", nil, nil) - addContainers(server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Cmd": ["bash", "-c", "ls"]}` - path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID) - request, _ := http.NewRequest("POST", path, strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - unleash := make(chan bool) - server.PrepareExec("*", func() { - <-unleash - }) - var exec docker.Exec - err := json.NewDecoder(recorder.Body).Decode(&exec) - if err != nil { - t.Fatal(err) - } - codes := make(chan int, 1) - sent := make(chan bool) - go func() { - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/exec/%s/start", exec.ID) - body := `{"Tty":true}` - request, _ := http.NewRequest("POST", path, strings.NewReader(body)) - close(sent) - server.ServeHTTP(recorder, request) - codes <- recorder.Code - }() - <-sent - execInfo, err := waitExec(server.URL(), exec.ID, true, 5) - if err != nil { - t.Fatal(err) - } - if !execInfo.Running { - t.Error("StartExec: expected exec to be running, but it's not running") - } - close(unleash) - if code := <-codes; code != http.StatusOK { - t.Errorf("StartExec: wrong status. Want %d. Got %d.", http.StatusOK, code) - } - execInfo, err = waitExec(server.URL(), exec.ID, false, 5) - if err != nil { - t.Fatal(err) - } - if execInfo.Running { - t.Error("StartExec: expected exec to be not running after start returns, but it's running") - } -} - -func TestStartExecContainerNotFound(t *testing.T) { - server, _ := NewServer("127.0.0.1:0", nil, nil) - addContainers(server, 1) - server.buildMuxer() - recorder := httptest.NewRecorder() - body := `{"Tty":true}` - request, _ := http.NewRequest("POST", "/exec/something-wat/start", strings.NewReader(body)) - server.ServeHTTP(recorder, request) -} - -func waitExec(url, execID string, running bool, maxTry int) (*docker.ExecInspect, error) { - client, err := docker.NewClient(url) - if err != nil { - return nil, err - } - exec, err := client.InspectExec(execID) - for i := 0; i < maxTry && exec.Running != running && err == nil; i++ { - time.Sleep(100e6) - exec, err = client.InspectExec(exec.ID) - } - return exec, err -} - -func TestStatsContainer(t *testing.T) { - server, err := NewServer("127.0.0.1:0", nil, nil) - if err != nil { - t.Fatal(err) - } - defer server.Stop() - addContainers(server, 2) - server.buildMuxer() - expected := docker.Stats{} - expected.CPUStats.CPUUsage.TotalUsage = 20 - server.PrepareStats(server.containers[0].ID, func(id string) docker.Stats { - return expected - }) - recorder := httptest.NewRecorder() - path := fmt.Sprintf("/containers/%s/stats?stream=false", server.containers[0].ID) - request, _ := http.NewRequest("GET", path, nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("StatsContainer: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - body := recorder.Body.Bytes() - var got docker.Stats - err = json.Unmarshal(body, &got) - if err != nil { - t.Fatal(err) - } - got.Read = time.Time{} - if !reflect.DeepEqual(got, expected) { - t.Errorf("StatsContainer: wrong value. Want %#v. Got %#v.", expected, got) - } -} - -type safeWriter struct { - sync.Mutex - *httptest.ResponseRecorder -} - -func (w *safeWriter) Write(buf []byte) (int, error) { - w.Lock() - defer w.Unlock() - return w.ResponseRecorder.Write(buf) -} - -func TestStatsContainerStream(t *testing.T) { - server, err := NewServer("127.0.0.1:0", nil, nil) - if err != nil { - t.Fatal(err) - } - defer server.Stop() - addContainers(server, 2) - server.buildMuxer() - expected := docker.Stats{} - expected.CPUStats.CPUUsage.TotalUsage = 20 - server.PrepareStats(server.containers[0].ID, func(id string) docker.Stats { - time.Sleep(50 * time.Millisecond) - return expected - }) - recorder := &safeWriter{ - ResponseRecorder: httptest.NewRecorder(), - } - path := fmt.Sprintf("/containers/%s/stats?stream=true", server.containers[0].ID) - request, _ := http.NewRequest("GET", path, nil) - go func() { - server.ServeHTTP(recorder, request) - }() - time.Sleep(200 * time.Millisecond) - recorder.Lock() - defer recorder.Unlock() - body := recorder.Body.Bytes() - parts := bytes.Split(body, []byte("\n")) - if len(parts) < 2 { - t.Errorf("StatsContainer: wrong number of parts. Want at least 2. Got %#v.", len(parts)) - } - var got docker.Stats - err = json.Unmarshal(parts[0], &got) - if err != nil { - t.Fatal(err) - } - got.Read = time.Time{} - if !reflect.DeepEqual(got, expected) { - t.Errorf("StatsContainer: wrong value. Want %#v. Got %#v.", expected, got) - } -} - -func addNetworks(server *DockerServer, n int) { - server.netMut.Lock() - defer server.netMut.Unlock() - for i := 0; i < n; i++ { - netid := fmt.Sprintf("%x", rand.Int()%10000) - network := docker.Network{ - Name: netid, - ID: fmt.Sprintf("%x", rand.Int()%10000), - Type: "bridge", - Endpoints: []*docker.Endpoint{ - { - Name: "blah", - ID: fmt.Sprintf("%x", rand.Int()%10000), - Network: netid, - }, - }, - } - server.networks = append(server.networks, &network) - } -} - -func TestListNetworks(t *testing.T) { - server := DockerServer{} - addNetworks(&server, 2) - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("GET", "/networks", nil) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusOK { - t.Errorf("ListNetworks: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code) - } - expected := make([]docker.Network, 2) - for i, network := range server.networks { - expected[i] = docker.Network{ - ID: network.ID, - Name: network.Name, - Type: network.Type, - Endpoints: network.Endpoints, - } - } - var got []docker.Network - err := json.NewDecoder(recorder.Body).Decode(&got) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got, expected) { - t.Errorf("ListNetworks. Want %#v. Got %#v.", expected, got) - } -} - -type createNetworkResponse struct { - ID string `json:"ID"` -} - -func TestCreateNetwork(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - netid := fmt.Sprintf("%x", rand.Int()%10000) - netname := fmt.Sprintf("%x", rand.Int()%10000) - body := fmt.Sprintf(`{"ID": "%s", "Name": "%s", "Type": "bridge" }`, netid, netname) - request, _ := http.NewRequest("POST", "/networks", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusCreated { - t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusCreated, recorder.Code) - } - - var returned createNetworkResponse - err := json.NewDecoder(recorder.Body).Decode(&returned) - if err != nil { - t.Fatal(err) - } - stored := server.networks[0] - if returned.ID != stored.ID { - t.Errorf("CreateNetwork: ID mismatch. Stored: %q. Returned: %q.", stored.ID, returned) - } -} - -func TestCreateNetworkInvalidBody(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - recorder := httptest.NewRecorder() - request, _ := http.NewRequest("POST", "/networks", strings.NewReader("whaaaaaat---")) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusBadRequest { - t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusBadRequest, recorder.Code) - } -} - -func TestCreateNetworkDuplicateName(t *testing.T) { - server := DockerServer{} - server.buildMuxer() - addNetworks(&server, 1) - server.networks[0].Name = "mynetwork" - recorder := httptest.NewRecorder() - body := fmt.Sprintf(`{"ID": "%s", "Name": "mynetwork", "Type": "bridge" }`, fmt.Sprintf("%x", rand.Int()%10000)) - request, _ := http.NewRequest("POST", "/networks", strings.NewReader(body)) - server.ServeHTTP(recorder, request) - if recorder.Code != http.StatusForbidden { - t.Errorf("CreateNetwork: wrong status. Want %d. Got %d.", http.StatusForbidden, recorder.Code) - } -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go deleted file mode 100644 index 55f43174b..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2014 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// -// The content is borrowed from Docker's own source code to provide a simple -// tls based dialer - -package docker - -import ( - "crypto/tls" - "errors" - "net" - "strings" - "time" -) - -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if cwc, ok := c.rawConn.(interface { - CloseWrite() error - }); ok { - return cwc.CloseWrite() - } - return nil -} - -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - rawConn, err := dialer.Dial(network, addr) - if err != nil { - return nil, err - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go deleted file mode 100644 index 4e6327254..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "errors" - "net/http" -) - -var ( - // ErrNoSuchVolume is the error returned when the volume does not exist. - ErrNoSuchVolume = errors.New("no such volume") - - // ErrVolumeInUse is the error returned when the volume requested to be removed is still in use. - ErrVolumeInUse = errors.New("volume in use and cannot be removed") -) - -// Volume represents a volume. -// -// See https://goo.gl/FZA4BK for more details. -type Volume struct { - Name string `json:"Name" yaml:"Name"` - Driver string `json:"Driver,omitempty" yaml:"Driver,omitempty"` - Mountpoint string `json:"Mountpoint,omitempty" yaml:"Mountpoint,omitempty"` -} - -// ListVolumesOptions specify parameters to the ListVolumes function. -// -// See https://goo.gl/FZA4BK for more details. -type ListVolumesOptions struct { - Filters map[string][]string -} - -// ListVolumes returns a list of available volumes in the server. -// -// See https://goo.gl/FZA4BK for more details. -func (c *Client) ListVolumes(opts ListVolumesOptions) ([]Volume, error) { - body, _, err := c.do("GET", "/volumes?"+queryString(opts), doOptions{}) - if err != nil { - return nil, err - } - m := make(map[string]interface{}) - if err := json.Unmarshal(body, &m); err != nil { - return nil, err - } - var volumes []Volume - volumesJSON, ok := m["Volumes"] - if !ok { - return volumes, nil - } - data, err := json.Marshal(volumesJSON) - if err != nil { - return nil, err - } - if err := json.Unmarshal(data, &volumes); err != nil { - return nil, err - } - return volumes, nil -} - -// CreateVolumeOptions specify parameters to the CreateVolume function. -// -// See https://goo.gl/pBUbZ9 for more details. -type CreateVolumeOptions struct { - Name string - Driver string - DriverOpts map[string]string -} - -// CreateVolume creates a volume on the server. -// -// See https://goo.gl/pBUbZ9 for more details. -func (c *Client) CreateVolume(opts CreateVolumeOptions) (*Volume, error) { - body, _, err := c.do("POST", "/volumes", doOptions{data: opts}) - if err != nil { - return nil, err - } - var volume Volume - if err := json.Unmarshal(body, &volume); err != nil { - return nil, err - } - return &volume, nil -} - -// InspectVolume returns a volume by its name. -// -// See https://goo.gl/0g9A6i for more details. -func (c *Client) InspectVolume(name string) (*Volume, error) { - body, status, err := c.do("GET", "/volumes/"+name, doOptions{}) - if status == http.StatusNotFound { - return nil, ErrNoSuchVolume - } - if err != nil { - return nil, err - } - var volume Volume - if err := json.Unmarshal(body, &volume); err != nil { - return nil, err - } - return &volume, nil -} - -// RemoveVolume removes a volume by its name. -// -// See https://goo.gl/79GNQz for more details. -func (c *Client) RemoveVolume(name string) error { - _, status, err := c.do("DELETE", "/volumes/"+name, doOptions{}) - if status == http.StatusNotFound { - return ErrNoSuchVolume - } - if status == http.StatusConflict { - return ErrVolumeInUse - } - return err -} diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go deleted file mode 100644 index 9707c09cd..000000000 --- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/volume_test.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 go-dockerclient authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package docker - -import ( - "encoding/json" - "net/http" - "net/url" - "reflect" - "testing" -) - -func TestListVolumes(t *testing.T) { - volumesData := `[ - { - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }, - { - "Name": "foo", - "Driver": "bar", - "Mountpoint": "/var/lib/docker/volumes/bar" - } -]` - body := `{ "Volumes": ` + volumesData + ` }` - var expected []Volume - if err := json.Unmarshal([]byte(volumesData), &expected); err != nil { - t.Fatal(err) - } - client := newTestClient(&FakeRoundTripper{message: body, status: http.StatusOK}) - volumes, err := client.ListVolumes(ListVolumesOptions{}) - if err != nil { - t.Error(err) - } - if !reflect.DeepEqual(volumes, expected) { - t.Errorf("ListVolumes: Wrong return value. Want %#v. Got %#v.", expected, volumes) - } -} - -func TestCreateVolume(t *testing.T) { - body := `{ - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }` - var expected Volume - if err := json.Unmarshal([]byte(body), &expected); err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - volume, err := client.CreateVolume( - CreateVolumeOptions{ - Name: "tardis", - Driver: "local", - DriverOpts: map[string]string{ - "foo": "bar", - }, - }, - ) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(volume, &expected) { - t.Errorf("CreateVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) - } - req := fakeRT.requests[0] - expectedMethod := "POST" - if req.Method != expectedMethod { - t.Errorf("CreateVolume(): Wrong HTTP method. Want %s. Got %s.", expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes")) - if req.URL.Path != u.Path { - t.Errorf("CreateVolume(): Wrong request path. Want %q. Got %q.", u.Path, req.URL.Path) - } -} - -func TestInspectVolume(t *testing.T) { - body := `{ - "Name": "tardis", - "Driver": "local", - "Mountpoint": "/var/lib/docker/volumes/tardis" - }` - var expected Volume - if err := json.Unmarshal([]byte(body), &expected); err != nil { - t.Fatal(err) - } - fakeRT := &FakeRoundTripper{message: body, status: http.StatusOK} - client := newTestClient(fakeRT) - name := "tardis" - volume, err := client.InspectVolume(name) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(volume, &expected) { - t.Errorf("InspectVolume: Wrong return value. Want %#v. Got %#v.", expected, volume) - } - req := fakeRT.requests[0] - expectedMethod := "GET" - if req.Method != expectedMethod { - t.Errorf("InspectVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name)) - if req.URL.Path != u.Path { - t.Errorf("CreateVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveVolume(t *testing.T) { - name := "test" - fakeRT := &FakeRoundTripper{message: "", status: http.StatusNoContent} - client := newTestClient(fakeRT) - if err := client.RemoveVolume(name); err != nil { - t.Fatal(err) - } - req := fakeRT.requests[0] - expectedMethod := "DELETE" - if req.Method != expectedMethod { - t.Errorf("RemoveVolume(%q): Wrong HTTP method. Want %s. Got %s.", name, expectedMethod, req.Method) - } - u, _ := url.Parse(client.getURL("/volumes/" + name)) - if req.URL.Path != u.Path { - t.Errorf("RemoveVolume(%q): Wrong request path. Want %q. Got %q.", name, u.Path, req.URL.Path) - } -} - -func TestRemoveVolumeNotFound(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "no such volume", status: http.StatusNotFound}) - if err := client.RemoveVolume("test:"); err != ErrNoSuchVolume { - t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrNoSuchVolume, err) - } -} - -func TestRemoveVolumeInUse(t *testing.T) { - client := newTestClient(&FakeRoundTripper{message: "volume in use and cannot be removed", status: http.StatusConflict}) - if err := client.RemoveVolume("test:"); err != ErrVolumeInUse { - t.Errorf("RemoveVolume: wrong error. Want %#v. Got %#v.", ErrVolumeInUse, err) - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/.gitignore b/Godeps/_workspace/src/github.com/gambol99/go-marathon/.gitignore deleted file mode 100644 index 3000820de..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/.gitignore +++ /dev/null @@ -1,38 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so -go-marathon.iml -.idea/ -Gemfile.lock -thin.* -examples/applications/applications -examples/multiple_endpoints/multiple_endpoints -examples/subscription/subscription -examples/groups/groups - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - - - -examples/events/events -tests/rest-api/rest-api -examples/tasks/tasks \ No newline at end of file diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/.travis.yml b/Godeps/_workspace/src/github.com/gambol99/go-marathon/.travis.yml deleted file mode 100644 index 886987c6c..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -# -# Author: Rohith (gambol99@gmail.com) -# Date: 2015-02-17 17:11:18 +0000 (Tue, 17 Feb 2015) -# -# vim:ts=2:sw=2:et -# -language: go -go: - - 1.3.3 - - 1.4 -install: - - go get - - make test diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/AUTHORS b/Godeps/_workspace/src/github.com/gambol99/go-marathon/AUTHORS deleted file mode 100644 index 17460fd06..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/AUTHORS +++ /dev/null @@ -1,11 +0,0 @@ -atheatos -Harpreet Sawhney -Ian Babrou -Israel Derdik -Luke Amdor -Nic Grayson -Raffaele Di Fazio -Raffaele Di Fazio -Rohith -Yang Bai -Zhanpeng Chen diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/LICENSE b/Godeps/_workspace/src/github.com/gambol99/go-marathon/LICENSE deleted file mode 100644 index 5e0fd33cb..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, -and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by -the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all -other entities that control, are controlled by, or are under common -control with that entity. For the purposes of this definition, -"control" means (i) the power, direct or indirect, to cause the -direction or management of such entity, whether by contract or -otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity -exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, -including but not limited to software source code, documentation -source, and configuration files. - -"Object" form shall mean any form resulting from mechanical -transformation or translation of a Source form, including but -not limited to compiled object code, generated documentation, -and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or -Object form, made available under the License, as indicated by a -copyright notice that is included in or attached to the work -(an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object -form, that is based on (or derived from) the Work and for which the -editorial revisions, annotations, elaborations, or other modifications -represent, as a whole, an original work of authorship. For the purposes -of this License, Derivative Works shall not include works that remain -separable from, or merely link (or bind by name) to the interfaces of, -the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including -the original version of the Work and any modifications or additions -to that Work or Derivative Works thereof, that is intentionally -submitted to Licensor for inclusion in the Work by the copyright owner -or by an individual or Legal Entity authorized to submit on behalf of -the copyright owner. For the purposes of this definition, "submitted" -means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, -and issue tracking systems that are managed by, or on behalf of, the -Licensor for the purpose of discussing and improving the Work, but -excluding communication that is conspicuously marked or otherwise -designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity -on behalf of whom a Contribution has been received by Licensor and -subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the -Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of -this License, each Contributor hereby grants to You a perpetual, -worldwide, non-exclusive, no-charge, royalty-free, irrevocable -(except as stated in this section) patent license to make, have made, -use, offer to sell, sell, import, and otherwise transfer the Work, -where such license applies only to those patent claims licensable -by such Contributor that are necessarily infringed by their -Contribution(s) alone or by combination of their Contribution(s) -with the Work to which such Contribution(s) was submitted. If You -institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work -or a Contribution incorporated within the Work constitutes direct -or contributory patent infringement, then any patent licenses -granted to You under this License for that Work shall terminate -as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the -Work or Derivative Works thereof in any medium, with or without -modifications, and in Source or Object form, provided that You -meet the following conditions: - -(a) You must give any other recipients of the Work or -Derivative Works a copy of this License; and - -(b) You must cause any modified files to carry prominent notices -stating that You changed the files; and - -(c) You must retain, in the Source form of any Derivative Works -that You distribute, all copyright, patent, trademark, and -attribution notices from the Source form of the Work, -excluding those notices that do not pertain to any part of -the Derivative Works; and - -(d) If the Work includes a "NOTICE" text file as part of its -distribution, then any Derivative Works that You distribute must -include a readable copy of the attribution notices contained -within such NOTICE file, excluding those notices that do not -pertain to any part of the Derivative Works, in at least one -of the following places: within a NOTICE text file distributed -as part of the Derivative Works; within the Source form or -documentation, if provided along with the Derivative Works; or, -within a display generated by the Derivative Works, if and -wherever such third-party notices normally appear. The contents -of the NOTICE file are for informational purposes only and -do not modify the License. You may add Your own attribution -notices within Derivative Works that You distribute, alongside -or as an addendum to the NOTICE text from the Work, provided -that such additional attribution notices cannot be construed -as modifying the License. - -You may add Your own copyright statement to Your modifications and -may provide additional or different license terms and conditions -for use, reproduction, or distribution of Your modifications, or -for any such Derivative Works as a whole, provided Your use, -reproduction, and distribution of the Work otherwise complies with -the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, -any Contribution intentionally submitted for inclusion in the Work -by You to the Licensor shall be under the terms and conditions of -this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify -the terms of any separate license agreement you may have executed -with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade -names, trademarks, service marks, or product names of the Licensor, -except as required for reasonable and customary use in describing the -origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or -agreed to in writing, Licensor provides the Work (and each -Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or -implied, including, without limitation, any warranties or conditions -of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A -PARTICULAR PURPOSE. You are solely responsible for determining the -appropriateness of using or redistributing the Work and assume any -risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, -whether in tort (including negligence), contract, or otherwise, -unless required by applicable law (such as deliberate and grossly -negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, -incidental, or consequential damages of any character arising as a -result of this License or out of the use or inability to use the -Work (including but not limited to damages for loss of goodwill, -work stoppage, computer failure or malfunction, or any and all -other commercial damages or losses), even if such Contributor -has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing -the Work or Derivative Works thereof, You may choose to offer, -and charge a fee for, acceptance of support, warranty, indemnity, -or other liability obligations and/or rights consistent with this -License. However, in accepting such obligations, You may act only -on Your own behalf and on Your sole responsibility, not on behalf -of any other Contributor, and only if You agree to indemnify, -defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason -of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - -To apply the Apache License to your work, attach the following -boilerplate notice, with the fields enclosed by brackets "{}" -replaced with your own identifying information. (Don't include -the brackets!) The text should be enclosed in the appropriate -comment syntax for the file format. We also recommend that a -file or class name and description of purpose be included on the -same "printed page" as the copyright notice for easier -identification within third-party archives. - -Copyright {yyyy} {name of copyright owner} - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/Makefile b/Godeps/_workspace/src/github.com/gambol99/go-marathon/Makefile deleted file mode 100644 index 45c8fa2b4..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/Makefile +++ /dev/null @@ -1,27 +0,0 @@ -# -# Author: Rohith (gambol99@gmail.com) -# Date: 2015-02-10 15:35:14 +0000 (Tue, 10 Feb 2015) -# -# vim:ts=2:sw=2:et -# -NAME="go-marathon" -AUTHOR=gambol99 -HARDWARE=$(shell uname -m) -VERSION=$(shell awk '/const Version/ { print $$4 }' version.go | sed 's/"//g') - -.PHONY: test examples authors changelog - -build: - go build - -authors: - git log --format='%aN <%aE>' | sort -u > AUTHORS - -test: - go get github.com/stretchr/testify/assert - go get gopkg.in/yaml.v2 - go test -v - -changelog: release - git log $(shell git tag | tail -n1)..HEAD --no-merges --format=%B > changelog - diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/README.md b/Godeps/_workspace/src/github.com/gambol99/go-marathon/README.md deleted file mode 100644 index db9cd13f4..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/README.md +++ /dev/null @@ -1,160 +0,0 @@ -[![Build Status](https://travis-ci.org/gambol99/go-marathon.svg?branch=master)](https://travis-ci.org/gambol99/go-marathon) -[![GoDoc](http://godoc.org/github.com/gambol99/go-marathon?status.png)](http://godoc.org/github.com/gambol99/go-marathon) - -#### **Go-Marathon** ------ - -Go-marathon is a API library for working with [Marathon](https://mesosphere.github.io/marathon/). It currently supports - - > - Application and group deployment - > - Helper filters for pulling the status, configuration and tasks - > - Multiple Endpoint support for HA deployments - > - Marathon Subscriptions and Event callbacks - - Note: the library still under active development; requires >= Go 1.3 - -#### **Code Examples** - ------- - -There is also a examples directory in the source, which show hints and snippets of code of how to use it - which is probably the best place to start. - -**Creating a client** - -```Go -import ( - "flag" - - marathon "github.com/gambol99/go-marathon" - "github.com/golang/glog" - "time" -) - -marathon_url := http://10.241.1.71:8080 - config := marathon.NewDefaultConfig() - config.URL = marathon_url - config.LogOutput = os.Stdout - if client, err := marathon.NewClient(config); err != nil { - glog.Fatalf("Failed to create a client for marathon, error: %s", err) - } else { - applications, err := client.Applications() - ... - ``` - -> Note, you can also specify multiple endpoint for Marathon (i.e. you have setup Marathon in HA mode and having multiple running) - -```Go -marathon := "http://10.241.1.71:8080,10.241.1.72:8080,10.241.1.73:8080" -``` - -The first one specified will be used, if that goes offline the member is marked as *"unavailable"* and a background process will continue to ping the member until it's back online. - -**Listing the applications** - -```Go -if applications, err := client.Applications(); err != nil - glog.Errorf("Failed to list applications") -} else { - glog.Infof("Found %d application running", len(applications.Apps)) - for _, application := range applications.Apps { - glog.Infof("Application: %s", application) - details, err := client.Application(application.ID) - Assert(err) - if details.Tasks != nil && len(details.Tasks) > 0 { - for _, task := range details.Tasks { - glog.Infof("task: %s", task) - } - // check the health of the application - health, err := client.ApplicationOK(details.ID) - glog.Infof("Application: %s, healthy: %t", details.ID, health) - } - } -``` - - **Creating a new application** - -```Go -glog.Infof("Deploying a new application") -application := marathon.NewDockerApplication() -application.Name("/product/name/frontend") -application.CPU(0.1).Memory(64).Storage(0.0).Count(2) -application.Arg("/usr/sbin/apache2ctl").Arg("-D").Arg("FOREGROUND") -application.AddEnv("NAME", "frontend_http") -application.AddEnv("SERVICE_80_NAME", "test_http") -// add the docker container -application.Container.Docker.Container("quay.io/gambol99/apache-php:latest").Expose(80).Expose(443) -application.CheckHTTP("/health", 10, 5) - -if _, err := client.CreateApplication(application, true); err != nil { - glog.Errorf("Failed to create application: %s, error: %s", application, err) -} else { - glog.Infof("Created the application: %s", application) -} -``` - -**Scale Application** - -Change the number of instance of the application to 4 - -```Go -glog.Infof("Scale to 4 instances") -if err := client.ScaleApplicationInstances(application.ID, 10); err != nil { - glog.Errorf("Failed to delete the application: %s, error: %s", application, err) -} else { - client.WaitOnApplication(application.ID, 0) - glog.Infof("Successfully scaled the application") -} -``` - -**Subscription & Events** - -Request to listen to events related to applications - namely status updates, health checks changes and failures - -```Go -/* step: lets register for events */ -update := make(marathon.EventsChannel,5) -if err := client.AddEventsListener(update, marathon.EVENTS_APPLICATIONS); err != nil { - glog.Fatalf("Failed to register for subscriptions, %s", err) -} else { - for { - event := <-update - glog.Infof("EVENT: %s", event ) - } -} - -# A full list of the events - -const ( - EVENT_API_REQUEST = 1 << iota - EVENT_STATUS_UPDATE - EVENT_FRAMEWORK_MESSAGE - EVENT_SUBSCRIPTION - EVENT_UNSUBSCRIBED - EVENT_ADD_HEALTH_CHECK - EVENT_REMOVE_HEALTH_CHECK - EVENT_FAILED_HEALTH_CHECK - EVENT_CHANGED_HEALTH_CHECK - EVENT_GROUP_CHANGE_SUCCESS - EVENT_GROUP_CHANGE_FAILED - EVENT_DEPLOYMENT_SUCCESS - EVENT_DEPLOYMENT_FAILED - EVENT_DEPLOYMENT_INFO - EVENT_DEPLOYMENT_STEP_SUCCESS - EVENT_DEPLOYMENT_STEP_FAILED -) - -const ( - EVENTS_APPLICATIONS = EVENT_STATUS_UPDATE | EVENT_CHANGED_HEALTH_CHECK | EVENT_FAILED_HEALTH_CHECK - EVENTS_SUBSCRIPTIONS = EVENT_SUBSCRIPTION | EVENT_UNSUBSCRIBED -) -``` - ----- - -#### **Contributing** - - - Fork it - - Create your feature branch (git checkout -b my-new-feature) - - Commit your changes (git commit -am 'Add some feature') - - Push to the branch (git push origin my-new-feature) - - Create new Pull Request - - If applicable, update the README.md diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/application.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/application.go deleted file mode 100644 index e745f83cd..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/application.go +++ /dev/null @@ -1,498 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "errors" - "fmt" - "net/url" - "time" -) - -var ( - ErrApplicationExists = errors.New("The application already exists in marathon, you must update") - /* no container has been specified yet */ - ErrNoApplicationContainer = errors.New("You have not specified a docker container yet") -) - -type Applications struct { - Apps []Application `json:"apps"` -} - -type ApplicationWrap struct { - Application Application `json:"app"` -} - -type Application struct { - ID string `json:"id",omitempty` - Cmd string `json:"cmd,omitempty"` - Args []string `json:"args,omitempty"` - Constraints [][]string `json:"constraints,omitempty"` - Container *Container `json:"container,omitempty"` - CPUs float64 `json:"cpus,omitempty"` - Disk float64 `json:"disk,omitempty"` - Env map[string]string `json:"env,omitempty"` - Executor string `json:"executor,omitempty"` - HealthChecks []*HealthCheck `json:"healthChecks,omitempty"` - Instances int `json:"instances,omitempty"` - Mem float64 `json:"mem,omitempty"` - Tasks []*Task `json:"tasks,omitempty"` - Ports []int `json:"ports"` - RequirePorts bool `json:"requirePorts,omitempty"` - BackoffSeconds float64 `json:"backoffSeconds,omitempty"` - BackoffFactor float64 `json:"backoffFactor,omitempty"` - MaxLaunchDelaySeconds float64 `json:"maxLaunchDelaySeconds,omitempty"` - DeploymentID []map[string]string `json:"deployments,omitempty"` - Dependencies []string `json:"dependencies,omitempty"` - TasksRunning int `json:"tasksRunning,omitempty"` - TasksStaged int `json:"tasksStaged,omitempty"` - User string `json:"user,omitempty"` - UpgradeStrategy *UpgradeStrategy `json:"upgradeStrategy,omitempty"` - Uris []string `json:"uris,omitempty"` - Version string `json:"version,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - AcceptedResourceRoles []string `json:"acceptedResourceRoles,omitempty"` - LastTaskFailure *LastTaskFailure `json:"lastTaskFailure,omitempty"` -} - -type ApplicationVersions struct { - Versions []string `json:"versions"` -} - -type ApplicationVersion struct { - Version string `json:"version"` -} - -func NewDockerApplication() *Application { - application := new(Application) - application.Container = NewDockerContainer() - return application -} - -// The name of the application i.e. the identifier for this application -func (application *Application) Name(id string) *Application { - application.ID = validateID(id) - return application -} - -// The amount of CPU shares per instance which is assigned to the application -// cpu: the CPU shared (check Docker docs) per instance -func (application *Application) CPU(cpu float64) *Application { - application.CPUs = cpu - return application -} - -// The amount of disk space the application is assigned, which for docker -// application I don't believe is relevant -// disk: the disk space in MB -func (application *Application) Storage(disk float64) *Application { - application.Disk = disk - return application -} - -// Check to see if all the application tasks are running, i.e. the instances is equal -// to the number of running tasks -func (application *Application) AllTaskRunning() bool { - if application.Instances == 0 { - return true - } - if application.Tasks == nil { - return false - } - if application.TasksRunning == application.Instances { - return true - } - return false -} - -// Adds a dependency for this application. Note, if you want to wait for an application -// dependency to actually be UP, i.e. not just deployed, you need a health check on the -// dependant app. -// name: the application id which this application depends on -func (application *Application) DependsOn(name string) *Application { - if application.Dependencies == nil { - application.Dependencies = make([]string, 0) - } - application.Dependencies = append(application.Dependencies, name) - return application - -} - -// The amount of memory the application can consume per instance -// memory: the amount of MB to assign -func (application *Application) Memory(memory float64) *Application { - application.Mem = memory - return application -} - -// Set the number of instances of the application to run -// count: the number of instances to run -func (application *Application) Count(count int) *Application { - application.Instances = count - return application -} - -// Add an argument to the applications -// argument: the argument you are adding -func (application *Application) Arg(argument string) *Application { - if application.Args == nil { - application.Args = make([]string, 0) - } - application.Args = append(application.Args, argument) - return application -} - -// Add an environment variable to the application -// name: the name of the variable -// value: go figure, the value associated to the above -func (application *Application) AddEnv(name, value string) *Application { - if application.Env == nil { - application.Env = make(map[string]string, 0) - } - application.Env[name] = value - return application -} - -// More of a helper method, used to check if an application has healtchecks -func (application *Application) HasHealthChecks() bool { - if application.HealthChecks != nil && len(application.HealthChecks) > 0 { - return true - } - return false -} - -// Retrieve the application deployments ID -func (application *Application) Deployments() []*DeploymentID { - deployments := make([]*DeploymentID, 0) - if application.DeploymentID == nil || len(application.DeploymentID) <= 0 { - return deployments - } - // step: extract the deployment id from the result - for _, deploy := range application.DeploymentID { - if id, found := deploy["id"]; found { - deployment := &DeploymentID{ - Version: application.Version, - DeploymentID: id, - } - deployments = append(deployments, deployment) - } - } - return deployments -} - -// Add an HTTP check to an application -// port: the port the check should be checking -// interval: the interval in seconds the check should be performed -func (application *Application) CheckHTTP(uri string, port, interval int) (*Application, error) { - if application.HealthChecks == nil { - application.HealthChecks = make([]*HealthCheck, 0) - } - if application.Container == nil || application.Container.Docker == nil { - return nil, ErrNoApplicationContainer - } - /* step: get the port index */ - if port_index, err := application.Container.Docker.ServicePortIndex(port); err != nil { - return nil, err - } else { - health := NewDefaultHealthCheck() - health.Path = uri - health.IntervalSeconds = interval - health.PortIndex = port_index - /* step: add to the checks */ - application.HealthChecks = append(application.HealthChecks, health) - return application, nil - } -} - -// Add a TCP check to an application; note the port mapping must already exist, or an -// error will thrown -// port: the port the check should, err, check -// interval: the interval in seconds the check should be performed -func (application *Application) CheckTCP(port, interval int) (*Application, error) { - if application.HealthChecks == nil { - application.HealthChecks = make([]*HealthCheck, 0) - } - if application.Container == nil || application.Container.Docker == nil { - return nil, ErrNoApplicationContainer - } - /* step: get the port index */ - if port_index, err := application.Container.Docker.ServicePortIndex(port); err != nil { - return nil, err - } else { - health := NewDefaultHealthCheck() - health.Protocol = "TCP" - health.IntervalSeconds = interval - health.PortIndex = port_index - /* step: add to the checks */ - application.HealthChecks = append(application.HealthChecks, health) - return application, nil - } -} - -// Retrieve an array of all the applications which are running in marathon -func (client *Client) Applications(v url.Values) (*Applications, error) { - applications := new(Applications) - if err := client.apiGet(MARATHON_API_APPS+"?"+v.Encode(), nil, applications); err != nil { - return nil, err - } - return applications, nil -} - -// Retrieve an array of the application names currently running in marathon -func (client *Client) ListApplications(v url.Values) ([]string, error) { - if applications, err := client.Applications(v); err != nil { - return nil, err - } else { - list := make([]string, 0) - for _, application := range applications.Apps { - list = append(list, application.ID) - } - return list, nil - } -} - -// Checks to see if the application version exists in Marathon -// name: the id used to identify the application -// version: the version (normally a timestamp) your looking for -func (client *Client) HasApplicationVersion(name, version string) (bool, error) { - id := trimRootPath(name) - if versions, err := client.ApplicationVersions(id); err != nil { - return false, err - } else { - if contains(versions.Versions, version) { - return true, nil - } - return false, nil - } -} - -// A list of versions which has been deployed with marathon for a specific application -// name: the id used to identify the application -func (client *Client) ApplicationVersions(name string) (*ApplicationVersions, error) { - uri := fmt.Sprintf("%s/%s/versions", MARATHON_API_APPS, trimRootPath(name)) - versions := new(ApplicationVersions) - if err := client.apiGet(uri, nil, versions); err != nil { - return nil, err - } - return versions, nil -} - -// Change / Revert the version of the application -// name: the id used to identify the application -// version: the version (normally a timestamp) you wish to change to -func (client *Client) SetApplicationVersion(name string, version *ApplicationVersion) (*DeploymentID, error) { - client.log("SetApplicationVersion() setting the application: %s to version: %s", name, version) - uri := fmt.Sprintf("%s/%s", MARATHON_API_APPS, trimRootPath(name)) - deploymentId := new(DeploymentID) - if err := client.apiPut(uri, version, deploymentId); err != nil { - client.log("SetApplicationVersion() Failed to change the application to version: %s, error: %s", version.Version, err) - return nil, err - } - return deploymentId, nil -} - -// Retrieve the application configuration from marathon -// name: the id used to identify the application -func (client *Client) Application(name string) (*Application, error) { - application := new(ApplicationWrap) - if err := client.apiGet(fmt.Sprintf("%s/%s", MARATHON_API_APPS, trimRootPath(name)), nil, application); err != nil { - return nil, err - } - return &application.Application, nil -} - -// Validates that the application, or more appropriately it's tasks have passed all the health checks. -// If no health checks exist, we simply return true -// name: the id used to identify the application -func (client *Client) ApplicationOK(name string) (bool, error) { - /* step: check the application even exists */ - if found, err := client.HasApplication(name); err != nil { - return false, err - } else if !found { - return false, ErrDoesNotExist - } - /* step: get the application */ - if application, err := client.Application(name); err != nil { - return false, err - } else { - /* step: if the application has not health checks, just return true */ - if application.HealthChecks == nil || len(application.HealthChecks) <= 0 { - return true, nil - } - /* step: does the application have any tasks */ - if application.Tasks == nil || len(application.Tasks) <= 0 { - return true, nil - } - /* step: iterate the application checks and look for false */ - for _, task := range application.Tasks { - if task.HealthCheckResult != nil { - for _, check := range task.HealthCheckResult { - //When a task is flapping in Marathon, this is sometimes nil - if check == nil { - return false, nil - } - if !check.Alive { - return false, nil - } - } - } - } - return true, nil - } -} - -// Retrieve an array of Deployment IDs for an application -// name: the id used to identify the application -func (client *Client) ApplicationDeployments(name string) ([]*DeploymentID, error) { - if application, err := client.Application(name); err != nil { - return nil, err - } else { - return application.Deployments(), nil - } -} - -// Creates a new application in Marathon -// application: the structure holding the application configuration -// wait_on_running: waits on the application deploying, i.e. the instances arre all running (note health checks are excluded) -func (client *Client) CreateApplication(application *Application, wait_on_running bool) (*Application, error) { - result := new(Application) - client.log("Creating an application: %s", application) - if err := client.apiPost(MARATHON_API_APPS, &application, result); err != nil { - return nil, err - } - // step: are we waiting for the application to start? - if wait_on_running { - return nil, client.WaitOnApplication(application.ID, 0) - } - return result, nil -} - -// Wait for an application to be deployed -// name: the id of the application -// timeout: a duration of time to wait for an application to deploy -func (client *Client) WaitOnApplication(name string, timeout time.Duration) error { - if timeout <= 0 { - timeout = client.config.DefaultDeploymentTimeout - } - // step: this is very naive approach - the problem with using deployment id's is - // one) from > 0.8.0 you can be handed a deployment Id on creation, but it may or may not exist in /v2/deployments - // two) there is NO WAY of checking if a deployment Id was successful (i.e. no history). So i poll /deployments - // as it's not there, was it successful? has it not been scheduled yet? should i wait for a second to see if the - // deployment starts? or have i missed it? ... - err := deadline(timeout, func(stop_channel chan bool) error { - var flick AtomicSwitch - go func() { - <-stop_channel - close(stop_channel) - flick.SwitchOn() - }() - for !flick.IsSwitched() { - if found, err := client.HasApplication(name); err != nil { - continue - } else if found { - if app, err := client.Application(name); err == nil && app.AllTaskRunning() { - return nil - } - } - time.Sleep(time.Duration(500) * time.Millisecond) - } - return nil - }) - return err -} - -// Checks to see if the application exists in marathon -// name: the id used to identify the application -func (client *Client) HasApplication(name string) (bool, error) { - client.log("HasApplication() Checking if application: %s exists in marathon", name) - if name == "" { - return false, ErrInvalidArgument - } - if applications, err := client.ListApplications(nil); err != nil { - return false, err - } else { - for _, id := range applications { - if name == id { - client.log("HasApplication() The application: %s presently exist in maration", name) - return true, nil - } - } - return false, nil - } -} - -// Deletes an application from marathon -// name: the id used to identify the application -func (client *Client) DeleteApplication(name string) (*DeploymentID, error) { - /* step: check of the application already exists */ - client.log("DeleteApplication() Deleting the application: %s", name) - deployID := new(DeploymentID) - if err := client.apiDelete(fmt.Sprintf("%s/%s", MARATHON_API_APPS, trimRootPath(name)), nil, deployID); err != nil { - return nil, err - } - return deployID, nil -} - -// Performs a rolling restart of marathon application -// name: the id used to identify the application -func (client *Client) RestartApplication(name string, force bool) (*DeploymentID, error) { - client.log("RestartApplication() Restarting the application: %s, force: %s", name, force) - deployment := new(DeploymentID) - var options struct { - Force bool `json:"force"` - } - options.Force = force - if err := client.apiGet(fmt.Sprintf("%s/%s/restart", MARATHON_API_APPS, trimRootPath(name)), &options, deployment); err != nil { - return nil, err - } - return deployment, nil -} - -// Change the number of instance an application is running -// name: the id used to identify the application -// instances: the number of instances you wish to change to -func (client *Client) ScaleApplicationInstances(name string, instances int) (*DeploymentID, error) { - client.log("ScaleApplicationInstances(): application: %s, instance: %d", name, instances) - changes := new(Application) - changes.ID = validateID(name) - changes.Instances = instances - uri := fmt.Sprintf("%s/%s", MARATHON_API_APPS, trimRootPath(name)) - deployID := new(DeploymentID) - if err := client.apiPut(uri, &changes, deployID); err != nil { - return nil, err - } - return deployID, nil -} - -// Updates a new application in Marathon -// application: the structure holding the application configuration -// wait_on_running: waits on the application deploying, i.e. the instances arre all running (note health checks are excluded) -func (client *Client) UpdateApplication(application *Application, wait_on_running bool) (*Application, error) { - result := new(Application) - client.log("Updating application: %s", application) - - uri := fmt.Sprintf("%s/%s", MARATHON_API_APPS, trimRootPath(application.ID)) - - if err := client.apiPut(uri, &application, result); err != nil { - return nil, err - } - // step: are we waiting for the application to start? - if wait_on_running { - return nil, client.WaitOnApplication(application.ID, 0) - } - return result, nil -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/application_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/application_test.go deleted file mode 100644 index 3398b36e6..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/application_test.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCreateApplication(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - application := NewDockerApplication() - application.ID = "/fake_app" - app, err := client.CreateApplication(application, false) - assert.Nil(t, err) - assert.NotNil(t, app) - assert.Equal(t, application.ID, "/fake_app") - assert.Equal(t, app.DeploymentID[0]["id"], "f44fd4fc-4330-4600-a68b-99c7bd33014a") -} - -func TestApplications(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - applications, err := client.Applications(nil) - assert.Nil(t, err) - assert.NotNil(t, applications) - assert.Equal(t, len(applications.Apps), 2) -} - -func TestListApplications(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - applications, err := client.ListApplications(nil) - assert.Nil(t, err) - assert.NotNil(t, applications) - assert.Equal(t, len(applications), 2) - assert.Equal(t, applications[0], FAKE_APP_NAME) - assert.Equal(t, applications[1], FAKE_APP_NAME_BROKEN) -} - -func TestApplicationVersions(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - versions, err := client.ApplicationVersions(FAKE_APP_NAME) - assert.Nil(t, err) - assert.NotNil(t, versions) - assert.NotNil(t, versions.Versions) - assert.Equal(t, len(versions.Versions), 1) - assert.Equal(t, versions.Versions[0], "2014-04-04T06:25:31.399Z") - /* check we get an error on app not there */ - versions, err = client.ApplicationVersions("/not/there") - assert.NotNil(t, err) -} - -func TestSetApplicationVersion(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - deployment, err := client.SetApplicationVersion(FAKE_APP_NAME, &ApplicationVersion{Version: "2014-08-26T07:37:50.462Z"}) - assert.Nil(t, err) - assert.NotNil(t, deployment) - assert.NotNil(t, deployment.Version) - assert.NotNil(t, deployment.DeploymentID) - assert.Equal(t, deployment.Version, "2014-08-26T07:37:50.462Z") - assert.Equal(t, deployment.DeploymentID, "83b215a6-4e26-4e44-9333-5c385eda6438") - - _, err = client.SetApplicationVersion("/not/there", &ApplicationVersion{Version: "2014-04-04T06:25:31.399Z"}) - assert.NotNil(t, err) -} - -func TestHasApplicationVersion(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - found, err := client.HasApplicationVersion(FAKE_APP_NAME, "2014-04-04T06:25:31.399Z") - assert.Nil(t, err) - assert.True(t, found) - found, err = client.HasApplicationVersion(FAKE_APP_NAME, "###2015-04-04T06:25:31.399Z") - assert.Nil(t, err) - assert.False(t, found) -} - -func TestApplicationOK(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - ok, err := client.ApplicationOK(FAKE_APP_NAME) - assert.Nil(t, err) - assert.True(t, ok) - ok, err = client.ApplicationOK(FAKE_APP_NAME_BROKEN) - assert.Nil(t, err) - assert.False(t, ok) -} - -func TestListApplication(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - application, err := client.Application(FAKE_APP_NAME) - assert.Nil(t, err) - assert.NotNil(t, application) - assert.Equal(t, application.ID, FAKE_APP_NAME) - assert.NotNil(t, application.HealthChecks) - assert.NotNil(t, application.Tasks) - assert.Equal(t, len(application.HealthChecks), 1) - assert.Equal(t, len(application.Tasks), 2) -} - -func TestHasApplication(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - found, err := client.HasApplication(FAKE_APP_NAME) - assert.Nil(t, err) - assert.True(t, found) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/client.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/client.go deleted file mode 100644 index 1aa99c653..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/client.go +++ /dev/null @@ -1,371 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - "net/url" - "strings" - "sync" - "time" -) - -const ( - HTTP_GET = "GET" - HTTP_PUT = "PUT" - HTTP_DELETE = "DELETE" - HTTP_POST = "POST" -) - -type Marathon interface { - /* -- APPLICATIONS --- */ - - /* check it see if a application exists */ - HasApplication(name string) (bool, error) - /* get a listing of the application ids */ - ListApplications(url.Values) ([]string, error) - /* a list of application versions */ - ApplicationVersions(name string) (*ApplicationVersions, error) - /* check a application version exists */ - HasApplicationVersion(name, version string) (bool, error) - /* change an application to a different version */ - SetApplicationVersion(name string, version *ApplicationVersion) (*DeploymentID, error) - /* check if an application is ok */ - ApplicationOK(name string) (bool, error) - /* create an application in marathon */ - CreateApplication(application *Application, wait_on_running bool) (*Application, error) - /* delete an application */ - DeleteApplication(name string) (*DeploymentID, error) - /* update an application in marathon */ - UpdateApplication(application *Application, wait_on_running bool) (*Application, error) - /* a list of deployments on a application */ - ApplicationDeployments(name string) ([]*DeploymentID, error) - /* scale a application */ - ScaleApplicationInstances(name string, instances int) (*DeploymentID, error) - /* restart an application */ - RestartApplication(name string, force bool) (*DeploymentID, error) - /* get a list of applications from marathon */ - Applications(url.Values) (*Applications, error) - /* get a specific application */ - Application(name string) (*Application, error) - /* wait of application */ - WaitOnApplication(name string, timeout time.Duration) error - - /* -- TASKS --- */ - - /* get a list of tasks for a specific application */ - Tasks(application string) (*Tasks, error) - /* get a list of all tasks */ - AllTasks() (*Tasks, error) - /* get a listing of the task ids */ - ListTasks() ([]string, error) - /* get the endpoints for a service on a application */ - TaskEndpoints(name string, port int, health_check bool) ([]string, error) - /* kill all the tasks for any application */ - KillApplicationTasks(application_id, hostname string, scale bool) (*Tasks, error) - /* kill a single task */ - KillTask(task_id string, scale bool) (*Task, error) - /* kill the given array of tasks */ - KillTasks(task_ids []string, scale bool) error - - /* --- GROUPS --- */ - - /* list all the groups in the system */ - Groups() (*Groups, error) - /* retrieve a specific group from marathon */ - Group(name string) (*Group, error) - /* create a group deployment */ - CreateGroup(group *Group, wait_on_running bool) error - /* delete a group */ - DeleteGroup(name string) (*DeploymentID, error) - /* update a groups */ - UpdateGroup(id string, group *Group) (*DeploymentID, error) - /* check if a group exists */ - HasGroup(name string) (bool, error) - /* wait for an group to be deployed */ - WaitOnGroup(name string, timeout time.Duration) error - - /* --- DEPLOYMENTS --- */ - - /* get a list of the deployments */ - Deployments() ([]*Deployment, error) - /* delete a deployment */ - DeleteDeployment(id string, force bool) (*DeploymentID, error) - /* check to see if a deployment exists */ - HasDeployment(id string) (bool, error) - /* wait of a deployment to finish */ - WaitOnDeployment(version string, timeout time.Duration) error - - /* --- SUBSCRIPTIONS --- */ - - /* a list of current subscriptions */ - Subscriptions() (*Subscriptions, error) - /* add a events listener */ - AddEventsListener(channel EventsChannel, filter int) error - /* remove a events listener */ - RemoveEventsListener(channel EventsChannel) - /* remove our self from subscriptions */ - UnSubscribe() error - - /* --- MISC --- */ - - /* get the marathon url */ - GetMarathonURL() string - /* ping the marathon */ - Ping() (bool, error) - /* grab the marathon server info */ - Info() (*Info, error) - /* retrieve the leader info */ - Leader() (string, error) - /* cause the current leader to abdicate */ - AbdicateLeader() (string, error) -} - -var ( - /* the url specified was invalid */ - ErrInvalidEndpoint = errors.New("Invalid Marathon endpoint specified") - /* invalid or error response from marathon */ - ErrInvalidResponse = errors.New("Invalid response from Marathon") - /* some resource does not exists */ - ErrDoesNotExist = errors.New("The resource does not exist") - /* all the marathon endpoints are down */ - ErrMarathonDown = errors.New("All the Marathon hosts are presently down") - /* unable to decode the response */ - ErrInvalidResult = errors.New("Unable to decode the response from Marathon") - /* invalid argument */ - ErrInvalidArgument = errors.New("The argument passed is invalid") - /* error return by marathon */ - ErrMarathonError = errors.New("Marathon error") - /* the operation has timed out */ - ErrTimeoutError = errors.New("The operation has timed out") -) - -type Client struct { - sync.RWMutex - /* the configuration for the client */ - config Config - /* the ip addess of the client */ - ipaddress string - /* the http server */ - events_http *http.Server - /* the http client */ - http *http.Client - /* the output for the logger */ - logger *log.Logger - /* the marathon cluster */ - cluster Cluster - /* a map of service you wish to listen to */ - listeners map[EventsChannel]int -} - -type Message struct { - Message string `json:"message"` -} - -func NewClient(config Config) (Marathon, error) { - /* step: we parse the url and build a cluster */ - if cluster, err := NewMarathonCluster(config.URL); err != nil { - return nil, err - } else { - // step: create the service marathon client - service := new(Client) - service.config = config - // step: create a logger from the output - if config.LogOutput == nil { - config.LogOutput = ioutil.Discard - } - service.logger = log.New(config.LogOutput, "[debug]: ", 0) - service.listeners = make(map[EventsChannel]int, 0) - service.cluster = cluster - service.http = &http.Client{ - Timeout: (time.Duration(config.RequestTimeout) * time.Second), - } - return service, nil - } -} - -func (client *Client) GetMarathonURL() string { - return client.cluster.Url() -} - -// Pings the current marathon endpoint (note, this is not a ICMP ping, but a rest api call) -func (client *Client) Ping() (bool, error) { - if err := client.apiGet(MARATHON_API_PING, nil, nil); err != nil { - return false, err - } - return true, nil -} - -func (client *Client) marshallJSON(data interface{}) (string, error) { - if response, err := json.Marshal(data); err != nil { - return "", err - } else { - return string(response), err - } -} - -func (client *Client) unMarshallDataToJson(stream io.Reader, result interface{}) error { - decoder := json.NewDecoder(stream) - if err := decoder.Decode(result); err != nil { - return err - } - return nil -} - -func (client *Client) unmarshallJsonArray(stream io.Reader, results []interface{}) error { - decoder := json.NewDecoder(stream) - if err := decoder.Decode(results); err != nil { - return err - } - return nil -} - -func (client *Client) apiPostData(data interface{}) (string, error) { - if data == nil { - return "", nil - } - content, err := client.marshallJSON(data) - if err != nil { - return "", err - } - return content, nil -} - -func (client *Client) apiGet(uri string, post, result interface{}) error { - if content, err := client.apiPostData(post); err != nil { - return err - } else { - _, _, error := client.apiCall(HTTP_GET, uri, content, result) - return error - } -} - -func (client *Client) apiPut(uri string, post, result interface{}) error { - if content, err := client.apiPostData(post); err != nil { - return err - } else { - _, _, error := client.apiCall(HTTP_PUT, uri, content, result) - return error - } -} - -func (client *Client) apiPost(uri string, post, result interface{}) error { - if content, err := client.apiPostData(post); err != nil { - return err - } else { - _, _, error := client.apiCall(HTTP_POST, uri, content, result) - return error - } -} - -func (client *Client) apiDelete(uri string, post, result interface{}) error { - if content, err := client.apiPostData(post); err != nil { - return err - } else { - _, _, error := client.apiCall(HTTP_DELETE, uri, content, result) - return error - } -} - -func (client *Client) apiCall(method, uri, body string, result interface{}) (int, string, error) { - client.log("apiCall() method: %s, uri: %s, body: %s", method, uri, body) - if status, content, _, err := client.httpCall(method, uri, body); err != nil { - return 0, "", err - } else { - client.log("apiCall() status: %d, content: %s\n", status, content) - if status >= 200 && status <= 299 { - if result != nil { - if err := client.unMarshallDataToJson(strings.NewReader(content), result); err != nil { - client.log("apiCall(): failed to unmarshall the response from marathon, error: %s", err) - return status, content, ErrInvalidResponse - } - } - client.log("apiCall() result: %V", result) - return status, content, nil - } - switch status { - case 500: - return status, "", ErrInvalidResponse - case 404: - return status, "", ErrDoesNotExist - } - - /* step: lets decode into a error message */ - var message Message - if err := client.unMarshallDataToJson(strings.NewReader(content), &message); err != nil { - return status, content, ErrInvalidResponse - } else { - errorMessage := "unknown error" - if message.Message != "" { - errorMessage = message.Message - } - return status, message.Message, errors.New(errorMessage) - } - } -} - -func (client *Client) httpCall(method, uri, body string) (int, string, *http.Response, error) { - /* step: get a member from the cluster */ - if marathon, err := client.cluster.GetMember(); err != nil { - return 0, "", nil, err - } else { - url := fmt.Sprintf("%s/%s", marathon, uri) - client.log("httpCall(): %s, uri: %s, url: %s", method, uri, url) - - if request, err := http.NewRequest(method, url, strings.NewReader(body)); err != nil { - return 0, "", nil, err - } else { - if client.config.HttpBasicAuthUser != "" { - request.SetBasicAuth(client.config.HttpBasicAuthUser, client.config.HttpBasicPassword) - } - request.Header.Add("Content-Type", "application/json") - request.Header.Add("Accept", "application/json") - var content string - /* step: perform the request */ - if response, err := client.http.Do(request); err != nil { - /* step: mark the endpoint as down */ - client.cluster.MarkDown() - /* step: retry the request with another endpoint */ - return client.httpCall(method, uri, body) - } else { - /* step: lets read in any content */ - client.log("httpCall: %s, uri: %s, url: %s\n", method, uri, url) - if response.ContentLength != 0 { - /* step: read in the content from the request */ - response_content, err := ioutil.ReadAll(response.Body) - if err != nil { - return response.StatusCode, "", response, err - } - content = string(response_content) - } - /* step: return the request */ - return response.StatusCode, content, response, nil - } - } - } - return 0, "", nil, errors.New("Unable to make call to marathon") -} - -func (client *Client) log(message string, args ...interface{}) { - client.logger.Printf(message+"\n", args...) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/client_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/client_test.go deleted file mode 100644 index 49deeb392..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/client_test.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPing(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - found, err := client.Ping() - assert.Nil(t, err) - assert.True(t, found) -} - -func TestGetMarathonURL(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - assert.Equal(t, client.GetMarathonURL(), FAKE_MARATHON_URL) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster.go deleted file mode 100644 index 554780d63..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster.go +++ /dev/null @@ -1,219 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" -) - -const ( - MEMBER_AVAILABLE = 0 - MEMBER_UNAVAILABLE = 1 -) - -type Cluster interface { - Url() string - /* retrieve a member from the cluster */ - GetMember() (string, error) - /* make the last member as down */ - MarkDown() - /* the size of the cluster */ - Size() int - /* the members which are available */ - Active() []string - /* the members which are NOT available */ - NonActive() []string -} - -type MarathonCluster struct { - sync.RWMutex - /* the cluster url */ - url string - /* a link list of members */ - members *Member - /* the number of members */ - size int - /* the protocol */ - protocol string - /* the current host */ - active *Member -} - -func (cluster MarathonCluster) String() string { - return fmt.Sprintf("url: %s|%s, members: %s, size: %d, active: %s", - cluster.protocol, cluster.url, cluster.members, cluster.size, cluster.active) -} - -func (cluster *MarathonCluster) ClusterState() []string { - list := make([]string, 0) - member := cluster.members - for i := 0; i < cluster.size; i++ { - list = append(list, fmt.Sprintf("%s", member)) - member = member.next - } - return list -} - -type Member struct { - /* the name / ip address of the host */ - hostname string - /* the status of the host */ - status int - /* the next member in the list */ - next *Member -} - -func (member Member) String() string { - status := "UP" - if member.status == MEMBER_UNAVAILABLE { - status = "DOWN" - } - return fmt.Sprintf("member: %s:%s", member.hostname, status) -} - -func NewMarathonCluster(marathon_url string) (Cluster, error) { - cluster := new(MarathonCluster) - /* step: parse the marathon url */ - if marathon, err := url.Parse(marathon_url); err != nil { - return nil, ErrInvalidEndpoint - } else { - /* step: check the protocol */ - if marathon.Scheme != "http" && marathon.Scheme != "https" { - return nil, ErrInvalidEndpoint - } - cluster.protocol = marathon.Scheme - cluster.url = marathon_url - - /* step: create a link list of the hosts */ - var previous *Member = nil - for index, host := range strings.SplitN(marathon.Host, ",", -1) { - /* step: create a new cluster member */ - member := new(Member) - member.hostname = host - cluster.size += 1 - /* step: if the first member */ - if index == 0 { - cluster.members = member - cluster.active = member - previous = member - } else { - previous.next = member - previous = member - } - } - /* step: close the link list */ - previous.next = cluster.active - } - return cluster, nil -} - -func (cluster *MarathonCluster) Url() string { - return cluster.url -} - -// Retrieve a list of active members -func (cluster *MarathonCluster) Active() []string { - cluster.RLock() - defer cluster.RUnlock() - member := cluster.active - list := make([]string, 0) - for i := 0; i < cluster.size; i++ { - if member.status == MEMBER_AVAILABLE { - list = append(list, member.hostname) - } - } - return list -} - -// Retrieve a list of endpoints which are non-active -func (cluster *MarathonCluster) NonActive() []string { - cluster.RLock() - defer cluster.RUnlock() - member := cluster.active - list := make([]string, 0) - for i := 0; i < cluster.size; i++ { - if member.status == MEMBER_UNAVAILABLE { - list = append(list, member.hostname) - } - } - return list -} - -// Retrieve the current member, i.e. the current endpoint in use -func (cluster *MarathonCluster) GetMember() (string, error) { - cluster.Lock() - defer cluster.Unlock() - for i := 0; i < cluster.size; i++ { - if cluster.active.status == MEMBER_AVAILABLE { - return cluster.GetMarathonURL(cluster.active), nil - } - /* move to the next member */ - if cluster.active.next != nil { - cluster.active = cluster.active.next - } else { - return "", errors.New("No cluster memebers available at the moment") - } - } - /* we reached the end and there were no members available */ - return "", errors.New("No cluster memebers available at the moment") -} - -// Retrieves the current marathon url -func (cluster *MarathonCluster) GetMarathonURL(member *Member) string { - return fmt.Sprintf("%s://%s", cluster.protocol, member.hostname) -} - -// Marks the current endpoint as down and waits for it to come back only -func (cluster *MarathonCluster) MarkDown() { - cluster.Lock() - defer cluster.Unlock() - - /* step: mark the current host as down */ - member := cluster.active - member.status = MEMBER_UNAVAILABLE - - /* step: create a go-routine to place the member back in */ - go func() { - http_client := &http.Client{} - - /* step: we wait a ping from the host to work */ - for { - if response, err := http_client.Get(cluster.GetMarathonURL(member) + "/ping"); err == nil && response.StatusCode == 200 { - member.status = MEMBER_AVAILABLE - return - } else { - time.Sleep(10 * time.Second) - } - } - }() - - /* step: move to the next member */ - if cluster.active.next != nil { - cluster.active = cluster.active.next - } -} - -// Retrieve the size of the cluster -func (cluster *MarathonCluster) Size() int { - return cluster.size -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster_test.go deleted file mode 100644 index e623d19ec..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/cluster_test.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var cluster Cluster - -func GetFakeCluster() { - if cluster == nil { - cluster, _ = NewMarathonCluster(FAKE_MARATHON_URL) - } -} - -func TestUrl(t *testing.T) { - GetFakeCluster() - assert.Equal(t, cluster.Url(), FAKE_MARATHON_URL) -} - -func TestSize(t *testing.T) { - GetFakeCluster() - assert.Equal(t, cluster.Size(), 2) -} - -func TestActive(t *testing.T) { - GetFakeCluster() - assert.Equal(t, len(cluster.Active()), 2) -} - -func TestNonActive(t *testing.T) { - GetFakeCluster() - assert.Equal(t, len(cluster.NonActive()), 0) -} - -func TestGetMember(t *testing.T) { - GetFakeCluster() - member, err := cluster.GetMember() - assert.Nil(t, err) - assert.Equal(t, member, "http://127.0.0.1:3000") -} - -func TestMarkdown(t *testing.T) { - GetFakeCluster() - assert.Equal(t, len(cluster.Active()), 2) - cluster.MarkDown() - time.Sleep(10 * time.Millisecond) - assert.Equal(t, len(cluster.Active()), 2 ) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/config.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/config.go deleted file mode 100644 index 84a8f0573..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/config.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "io" - "io/ioutil" - "time" -) - -type Config struct { - /* the url for marathon */ - URL string - /* event handler port */ - EventsPort int - /* the interface we should be listening on for events */ - EventsInterface string - /* the output for logging */ - LogOutput io.Writer - /* the timeout for requests */ - RequestTimeout int - /* the default timeout for deployments */ - DefaultDeploymentTimeout time.Duration - /* http basic auth */ - HttpBasicAuthUser string - /* http basic password */ - HttpBasicPassword string -} - -func NewDefaultConfig() Config { - return Config{ - URL: "http://127.0.0.1:8080", - EventsPort: 10001, - EventsInterface: "eth0", - LogOutput: ioutil.Discard, - HttpBasicAuthUser: "", - HttpBasicPassword: "", - DefaultDeploymentTimeout: time.Duration(300) * time.Second, - RequestTimeout: 5} -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/const.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/const.go deleted file mode 100644 index bce5c6f6d..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/const.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -const ( - DEFAULT_EVENTS_URL = "/event" - - /* --- api related constants --- */ - MARATHON_API_VERSION = "v2" - MARATHON_API_SUBSCRIPTION = MARATHON_API_VERSION + "/eventSubscriptions" - MARATHON_API_APPS = MARATHON_API_VERSION + "/apps" - MARATHON_API_TASKS = MARATHON_API_VERSION + "/tasks" - MARATHON_API_DEPLOYMENTS = MARATHON_API_VERSION + "/deployments" - MARATHON_API_GROUPS = MARATHON_API_VERSION + "/groups" - MARATHON_API_QUEUE = MARATHON_API_VERSION + "/queue" - MARATHON_API_INFO = MARATHON_API_VERSION + "/info" - MARATHON_API_LEADER = MARATHON_API_VERSION + "/leader" - MARATHON_API_PING = "/ping" - MARATHON_API_LOGGING = "/logging" - MARATHON_API_HELP = "/help" - MARATHON_API_METRICS = "/metrics" -) diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment.go deleted file mode 100644 index 0b5468b0a..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "fmt" - "time" -) - -type Deployment struct { - ID string `json:"id"` - Version string `json:"version"` - CurrentStep int `json:"currentStep"` - TotalSteps int `json:"totalSteps"` - AffectedApps []string `json:"affectedApps"` - Steps [][]*DeploymentStep `json:"steps"` - CurrentActions []*DeploymentStep `json:"currentActions"` -} - -type DeploymentID struct { - DeploymentID string `json:"deploymentId"` - Version string `json:"version"` -} - -type DeploymentStep struct { - Action string `json:"action"` - App string `json:"app"` -} - -type DeploymentPlan struct { - ID string `json:"id"` - Version string `json:"version"` - Original struct { - Apps []*Application `json:"apps"` - Dependencies []string `json:"dependencies"` - Groups []*Group `json:"groups"` - ID string `json:"id"` - Version string `json:"version"` - } `json:"original"` - Steps []*DeploymentStep `json:"steps"` - Target struct { - Apps []*Application `json:"apps"` - Dependencies []string `json:"dependencies"` - Groups []*Group `json:"groups"` - ID string `json:"id"` - Version string `json:"version"` - } `json:"target"` -} - -// Retrieve a list of current deployments -func (client *Client) Deployments() ([]*Deployment, error) { - var deployments []*Deployment - if err := client.apiGet(MARATHON_API_DEPLOYMENTS, nil, &deployments); err != nil { - return nil, err - } else { - return deployments, nil - } -} - -// Delete a current deployment from marathon -// id: the deployment id you wish to delete -// force: whether or not to force the deletion -func (client *Client) DeleteDeployment(id string, force bool) (*DeploymentID, error) { - deployment := new(DeploymentID) - if err := client.apiDelete(fmt.Sprintf("%s/%s", MARATHON_API_DEPLOYMENTS, id), nil, deployment); err != nil { - return nil, err - } else { - return deployment, nil - } -} - -// Check to see if a deployment exists -// id: the deployment id you are looking for -func (client *Client) HasDeployment(id string) (bool, error) { - deployments, err := client.Deployments() - if err != nil { - return false, err - } - for _, deployment := range deployments { - if deployment.ID == id { - return true, nil - } - } - return false, nil -} - -// Wait of a deployment to finish -// version: the version of the application -// timeout: the timeout to wait for the deployment to take, otherwise return an error -func (client *Client) WaitOnDeployment(id string, timeout time.Duration) error { - if found, err := client.HasDeployment(id); err != nil { - return err - } else if !found { - return nil - } - - client.log("WaitOnDeployment() Waiting for deployment: %s to finish", id) - now_time := time.Now() - stop_time := now_time.Add(timeout) - if timeout <= 0 { - stop_time = now_time.Add(time.Duration(900) * time.Second) - } - - // step: a somewhat naive implementation, but it will work - for { - if time.Now().After(stop_time) { - return ErrTimeoutError - } - found, err := client.HasDeployment(id) - if err != nil { - client.log("WaitOnDeployment() Failed to get the deployments list, error: %s", err) - } - if !found { - return nil - } - time.Sleep(time.Duration(2) * time.Second) - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment_test.go deleted file mode 100644 index 26b8933ff..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/deployment_test.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDeployments(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - deployments, err := client.Deployments() - assert.Nil(t, err) - assert.NotNil(t, deployments) - assert.Equal(t, len(deployments), 1) - deployment := deployments[0] - assert.NotNil(t, deployment) - assert.Equal(t, deployment.ID, "867ed450-f6a8-4d33-9b0e-e11c5513990b") - assert.NotNil(t, deployment.Steps) - assert.Equal(t, len(deployment.Steps), 1) -} - -func TestDeleteDeployment(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - id, err := client.DeleteDeployment(FAKE_DEPLOYMENT_ID, false) - assert.Nil(t, err) - assert.NotNil(t, t) - assert.Equal(t, id.DeploymentID, "0b1467fc-d5cd-4bbc-bac2-2805351cee1e") - assert.Equal(t, id.Version, "2014-08-26T08:20:26.171Z") -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/docker.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/docker.go deleted file mode 100644 index 188f1c6ac..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/docker.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import "errors" - -type Container struct { - Type string `json:"type,omitempty"` - Docker *Docker `json:"docker,omitempty"` - Volumes []*Volume `json:"volumes,omitempty"` -} -type PortMapping struct { - ContainerPort int `json:"containerPort,omitempty"` - HostPort int `json:"hostPort"` - ServicePort int `json:"servicePort,omitempty"` - Protocol string `json:"protocol"` -} - -type Parameters struct { - Key string `json:"key,omitempty"` - Value string `json:"value,omitempty"` -} - -type Volume struct { - ContainerPath string `json:"containerPath,omitempty"` - HostPath string `json:"hostPath,omitempty"` - Mode string `json:"mode,omitempty"` -} - -type Docker struct { - ForcePullImage bool `json:"forcePullImage,omitempty"` - Image string `json:"image,omitempty"` - Network string `json:"network,omitempty"` - Parameters []*Parameters `json:"parameters,omitempty"` - PortMappings []*PortMapping `json:"portMappings,omitempty"` - Privileged bool `json:"privileged,omitempty"` -} - -func (container *Container) Volume(host_path, container_path, mode string) *Container { - if container.Volumes == nil { - container.Volumes = make([]*Volume, 0) - } - container.Volumes = append(container.Volumes, &Volume{ - ContainerPath: container_path, - HostPath: host_path, - Mode: mode, - }) - return container -} - -func NewDockerContainer() *Container { - container := new(Container) - container.Type = "DOCKER" - container.Docker = &Docker{ - Image: "", - Network: "BRIDGE", - PortMappings: make([]*PortMapping, 0), - Parameters: make([]*Parameters, 0), - } - container.Volumes = make([]*Volume, 0) - return container -} - -func (docker *Docker) Container(image string) *Docker { - docker.Image = image - return docker -} - -func (docker *Docker) Bridged() *Docker { - docker.Network = "BRIDGE" - return docker -} - -func (docker *Docker) Expose(port int) *Docker { - docker.ExposePort(port, 0, 0, "tcp") - return docker -} - -func (docker *Docker) ExposeUDP(port int) *Docker { - docker.ExposePort(port, 0, 0, "udp") - return docker -} - -func (docker *Docker) ExposePort(container_port, host_port, service_port int, protocol string) *Docker { - if docker.PortMappings == nil { - docker.PortMappings = make([]*PortMapping, 0) - } - docker.PortMappings = append(docker.PortMappings, &PortMapping{ - ContainerPort: container_port, - HostPort: host_port, - ServicePort: service_port, - Protocol: protocol}) - return docker -} - -func (docker *Docker) Parameter(key string, value string) *Docker { - if docker.Parameters == nil { - docker.Parameters = make([]*Parameters, 0) - } - docker.Parameters = append(docker.Parameters, &Parameters{ - Key: key, - Value: value}) - - return docker -} - -func (docker *Docker) ServicePortIndex(port int) (int, error) { - if docker.PortMappings == nil || len(docker.PortMappings) <= 0 { - return 0, errors.New("The docker does not contain any port mappings to search") - } - /* step: iterate and find the port */ - for index, container_port := range docker.PortMappings { - if container_port.ContainerPort == port { - return index, nil - } - } - /* step: we didn't find the port in the mappings */ - return 0, errors.New("The container port required was not found in the container port mappings") -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/events.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/events.go deleted file mode 100644 index a58dab511..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/events.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import "fmt" - -type EventType struct { - EventType string `json:"eventType"` -} - -const ( - EVENT_API_REQUEST = 1 << iota - EVENT_STATUS_UPDATE - EVENT_FRAMEWORK_MESSAGE - EVENT_SUBSCRIPTION - EVENT_UNSUBSCRIBED - EVENT_ADD_HEALTH_CHECK - EVENT_REMOVE_HEALTH_CHECK - EVENT_FAILED_HEALTH_CHECK - EVENT_CHANGED_HEALTH_CHECK - EVENT_GROUP_CHANGE_SUCCESS - EVENT_GROUP_CHANGE_FAILED - EVENT_DEPLOYMENT_SUCCESS - EVENT_DEPLOYMENT_FAILED - EVENT_DEPLOYMENT_INFO - EVENT_DEPLOYMENT_STEP_SUCCESS - EVENT_DEPLOYMENT_STEP_FAILED - EVENT_APP_TERMINATED -) - -const ( - EVENTS_APPLICATIONS = EVENT_STATUS_UPDATE | EVENT_CHANGED_HEALTH_CHECK | EVENT_FAILED_HEALTH_CHECK | EVENT_APP_TERMINATED - EVENTS_SUBSCRIPTIONS = EVENT_SUBSCRIPTION | EVENT_UNSUBSCRIBED -) - -var ( - Events map[string]int -) - -func init() { - Events = map[string]int{ - "api_post_event": EVENT_API_REQUEST, - "status_update_event": EVENT_STATUS_UPDATE, - "framework_message_event": EVENT_FRAMEWORK_MESSAGE, - "subscribe_event": EVENT_SUBSCRIPTION, - "unsubscribe_event": EVENT_UNSUBSCRIBED, - "add_health_check_event": EVENT_ADD_HEALTH_CHECK, - "remove_health_check_event": EVENT_REMOVE_HEALTH_CHECK, - "failed_health_check_event": EVENT_FAILED_HEALTH_CHECK, - "health_status_changed_event": EVENT_CHANGED_HEALTH_CHECK, - "group_change_success": EVENT_GROUP_CHANGE_SUCCESS, - "group_change_failed": EVENT_GROUP_CHANGE_FAILED, - "deployment_success": EVENT_DEPLOYMENT_SUCCESS, - "deployment_failed": EVENT_DEPLOYMENT_FAILED, - "deployment_info": EVENT_DEPLOYMENT_INFO, - "deployment_step_success": EVENT_DEPLOYMENT_STEP_SUCCESS, - "deployment_step_failure": EVENT_DEPLOYMENT_STEP_FAILED, - "app_terminated_event": EVENT_APP_TERMINATED, - } -} - -// -// Events taken from: https://mesosphere.github.io/marathon/docs/event-bus.html -// - -type Event struct { - ID int - Name string - Event interface{} -} - -func (event *Event) String() string { - return fmt.Sprintf("type: %s, event: %s", event.Name, event.Event) -} - -type EventsChannel chan *Event - -/* --- API Request --- */ - -type EventAPIRequest struct { - EventType string `json:"eventType"` - ClientIp string `json:"clientIp"` - Timestamp string `json:"timestamp"` - Uri string `json:"uri"` - AppDefinition *Application `json:"appDefinition"` -} - -/* --- Status Update --- */ - -type EventStatusUpdate struct { - EventType string `json:"eventType"` - Timestamp string `json:"timestamp,omitempty"` - SlaveID string `json:"slaveId,omitempty"` - TaskID string `json:"taskId"` - TaskStatus string `json:"taskStatus"` - AppID string `json:"appId"` - Host string `json:"host"` - Ports []int `json:"ports,omitempty"` - Version string `json:"version,omitempty"` -} - -type EventAppTerminated struct { - EventType string `json:"eventType"` - Timestamp string `json:"timestamp,omitempty"` - AppID string `json:"appId"` -} - -/* --- Framework Message --- */ - -type EventFrameworkMessage struct { - EventType string `json:"eventType"` - ExecutorId string `json:"executorId"` - Message string `json:"message"` - SlaveId string `json:"slaveId"` - Timestamp string `json:"timestamp"` -} - -/* --- Event Subscription --- */ - -type EventSubscription struct { - CallbackUrl string `json:"callbackUrl"` - ClientIp string `json:"clientIp"` - EventType string `json:"eventType"` - Timestamp string `json:"timestamp"` -} - -type EventUnsubscription struct { - CallbackUrl string `json:"callbackUrl"` - ClientIp string `json:"clientIp"` - EventType string `json:"eventType"` - Timestamp string `json:"timestamp"` -} - -/* --- Health Checks --- */ - -type EventAddHealthCheck struct { - AppId string `json:"appId"` - EventType string `json:"eventType"` - HealthCheck struct { - GracePeriodSeconds float64 `json:"gracePeriodSeconds"` - IntervalSeconds float64 `json:"intervalSeconds"` - MaxConsecutiveFailures float64 `json:"maxConsecutiveFailures"` - Path string `json:"path"` - PortIndex float64 `json:"portIndex"` - Protocol string `json:"protocol"` - TimeoutSeconds float64 `json:"timeoutSeconds"` - } `json:"healthCheck"` - Timestamp string `json:"timestamp"` -} - -type EventRemoveHealthCheck struct { - AppId string `json:"appId"` - EventType string `json:"eventType"` - HealthCheck struct { - GracePeriodSeconds float64 `json:"gracePeriodSeconds"` - IntervalSeconds float64 `json:"intervalSeconds"` - MaxConsecutiveFailures float64 `json:"maxConsecutiveFailures"` - Path string `json:"path"` - PortIndex float64 `json:"portIndex"` - Protocol string `json:"protocol"` - TimeoutSeconds float64 `json:"timeoutSeconds"` - } `json:"healthCheck"` - Timestamp string `json:"timestamp"` -} - -type EventFailedHealthCheck struct { - AppId string `json:"appId"` - EventType string `json:"eventType"` - HealthCheck struct { - GracePeriodSeconds float64 `json:"gracePeriodSeconds"` - IntervalSeconds float64 `json:"intervalSeconds"` - MaxConsecutiveFailures float64 `json:"maxConsecutiveFailures"` - Path string `json:"path"` - PortIndex float64 `json:"portIndex"` - Protocol string `json:"protocol"` - TimeoutSeconds float64 `json:"timeoutSeconds"` - } `json:"healthCheck"` - Timestamp string `json:"timestamp"` -} - -type EventHealthCheckChanged struct { - EventType string `json:"eventType"` - Timestamp string `json:"timestamp,omitempty"` - AppID string `json:"appId"` - TaskID string `json:"taskId"` - Version string `json:"version,omitempty"` - Alive bool `json:"alive"` -} - -/* --- Deployments --- */ - -type EventGroupChangeSuccess struct { - EventType string `json:"eventType"` - GroupId string `json:"groupId"` - Timestamp string `json:"timestamp"` - Version string `json:"version"` -} - -type EventGroupChangeFailed struct { - EventType string `json:"eventType"` - GroupId string `json:"groupId"` - Timestamp string `json:"timestamp"` - Version string `json:"version"` - Reason string `json:"reason"` -} - -type EventDeploymentSuccess struct { - ID string `json:"id"` - EventType string `json:"eventType"` - Timestamp string `json:"timestamp"` -} - -type EventDeploymentFailed struct { - ID string `json:"id"` - EventType string `json:"eventType"` - Timestamp string `json:"timestamp"` -} - -type EventDeploymentInfo struct { - EventType string `json:"eventType"` - CurrentStep *DeploymentStep `json:"currentStep"` - Timestamp string `json:"timestamp"` - Plan *DeploymentPlan `json:"plan"` -} - -type EventDeploymentStepSuccess struct { - EventType string `json:"eventType"` - CurrentStep *DeploymentStep `json:"currentStep"` - Timestamp string `json:"timestamp"` - Plan *DeploymentPlan `json:"plan"` -} - -type EventDeploymentStepFailure struct { - EventType string `json:"eventType"` - CurrentStep *DeploymentStep `json:"currentStep"` - Timestamp string `json:"timestamp"` - Plan *DeploymentPlan `json:"plan"` -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/applications/main.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/applications/main.go deleted file mode 100644 index 5e5c330de..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/applications/main.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "os" - "time" - - marathon "github.com/gambol99/go-marathon" - - "github.com/golang/glog" -) - -var marathon_url string - -func init() { - flag.StringVar(&marathon_url, "url", "http://127.0.0.1:8080", "the url for the marathon endpoint") -} - -func Assert(err error) { - if err != nil { - glog.Fatalf("Failed, error: %s", err) - } -} - -func waitOnDeployment(client marathon.Marathon, id *marathon.DeploymentID) { - Assert(client.WaitOnDeployment(id.DeploymentID, 0)) -} - -func main() { - flag.Parse() - config := marathon.NewDefaultConfig() - config.URL = marathon_url - config.LogOutput = os.Stdout - client, err := marathon.NewClient(config) - Assert(err) - applications, err := client.Applications(nil) - Assert(err) - - glog.Infof("Found %d application running", len(applications.Apps)) - for _, application := range applications.Apps { - glog.Infof("Application: %s", application) - details, err := client.Application(application.ID) - Assert(err) - if details.Tasks != nil && len(details.Tasks) > 0 { - for _, task := range details.Tasks { - glog.Infof("task: %s", task) - } - health, err := client.ApplicationOK(details.ID) - Assert(err) - glog.Infof("Application: %s, healthy: %t", details.ID, health) - } - } - - APPLICATION_NAME := "/my/product" - - if found, _ := client.HasApplication(APPLICATION_NAME); found { - deployId, err := client.DeleteApplication(APPLICATION_NAME) - Assert(err) - waitOnDeployment(client, deployId) - } - - glog.Infof("Deploying a new application") - application := marathon.NewDockerApplication() - application.Name(APPLICATION_NAME) - application.CPU(0.1).Memory(64).Storage(0.0).Count(2) - application.Arg("/usr/sbin/apache2ctl").Arg("-D").Arg("FOREGROUND") - application.AddEnv("NAME", "frontend_http") - application.AddEnv("SERVICE_80_NAME", "test_http") - application.RequirePorts = true - application.Container.Docker.Container("quay.io/gambol99/apache-php:latest").Expose(80).Expose(443) - err, _ = client.CreateApplication(application, true) - Assert(err) - - glog.Infof("Scaling the application to 4 instances") - deployId, err := client.ScaleApplicationInstances(application.ID, 4) - Assert(err) - client.WaitOnApplication(application.ID, 0) - glog.Infof("Successfully scaled the application, deployId: %s", deployId.DeploymentID) - - glog.Infof("Deleting the application: %s", APPLICATION_NAME) - deployId, err = client.DeleteApplication(application.ID) - Assert(err) - time.Sleep(time.Duration(10) * time.Second) - glog.Infof("Successfully deleted the application") - - glog.Infof("Starting the application again") - err, _ = client.CreateApplication(application, true) - Assert(err) - glog.Infof("Created the application: %s", application.ID) - - glog.Infof("Delete all the tasks") - _, err = client.KillApplicationTasks(application.ID, "", false) - Assert(err) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/events/main.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/events/main.go deleted file mode 100644 index cedf96915..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/events/main.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "os" - "time" - - marathon "github.com/gambol99/go-marathon" - - "github.com/golang/glog" -) - -var marathon_url string -var marathon_interface string -var marathon_port int -var timeout int - -func init() { - flag.StringVar(&marathon_url, "url", "http://127.0.0.1:8080", "the url for the marathon endpoint") - flag.StringVar(&marathon_interface, "interface", "eth0", "the interface we should use for events") - flag.IntVar(&marathon_port, "port", 19999, "the port the events service should run on") - flag.IntVar(&timeout, "timeout", 60, "listen to events for x seconds") -} - -func Assert(err error) { - if err != nil { - glog.Fatalf("Failed, error: %s", err) - } -} - -func main() { - flag.Parse() - config := marathon.NewDefaultConfig() - config.URL = marathon_url - config.LogOutput = os.Stdout - config.EventsPort = marathon_port - config.EventsInterface = marathon_interface - glog.Infof("Creating a client Marathon: %s", marathon_url) - - client, err := marathon.NewClient(config) - Assert(err) - - /* step: lets register for events */ - events := make(marathon.EventsChannel, 5) - deployments := make(marathon.EventsChannel, 5) - Assert(client.AddEventsListener(events, marathon.EVENTS_APPLICATIONS)) - Assert(client.AddEventsListener(deployments, marathon.EVENT_DEPLOYMENT_STEP_SUCCESS)) - - // lets listen for 10 seconds and then split - timer := time.After(time.Duration(timeout) * time.Second) - kill_off := false - for { - if kill_off { - break - } - select { - case <-timer: - glog.Infof("Exitting the loop") - kill_off = true - case event := <-events: - glog.Infof("Recieved application event: %s", event) - case event := <-deployments: - glog.Infof("Recieved deployment event: %v", event) - var deployment *marathon.EventDeploymentStepSuccess - deployment = event.Event.(*marathon.EventDeploymentStepSuccess) - glog.Infof("deployment step:: %v", deployment.CurrentStep) - } - } - - glog.Infof("Removing our subscription") - client.RemoveEventsListener(events) - client.RemoveEventsListener(deployments) - - Assert(client.UnSubscribe()) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/groups/main.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/groups/main.go deleted file mode 100644 index c095aa83f..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/groups/main.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "os" - - marathon "github.com/gambol99/go-marathon" - "github.com/golang/glog" -) - -var marathon_url string - -func init() { - flag.StringVar(&marathon_url, "url", "http://127.0.0.1:8080", "the url for the marathon endpoint") -} - -func Assert(err error) { - if err != nil { - glog.Fatalf("Failed, error: %s", err) - } -} - -func main() { - flag.Parse() - config := marathon.NewDefaultConfig() - config.URL = marathon_url - config.LogOutput = os.Stdout - client, err := marathon.NewClient(config) - if err != nil { - glog.Fatalf("Failed to create a client for marathon, error: %s", err) - } - - glog.Infof("Retrieving a list of groups") - if groups, err := client.Groups(); err != nil { - glog.Errorf("Failed to retrieve the groups from maratho, error: %s", err) - } else { - for _, group := range groups.Groups { - glog.Infof("Found group: %s", group.ID) - } - } - - GROUP_NAME := "/product/group" - - found, err := client.HasGroup(GROUP_NAME) - Assert(err) - if found { - glog.Infof("Deleting the grouy: %s, as it already exists", GROUP_NAME) - id, err := client.DeleteGroup(GROUP_NAME) - Assert(err) - err = client.WaitOnDeployment(id.DeploymentID, 0) - Assert(err) - } - - /* step: the frontend app */ - frontend := marathon.NewDockerApplication() - frontend.Name("/product/group/frontend") - frontend.CPU(0.1).Memory(64).Storage(0.0).Count(2) - frontend.Arg("/usr/sbin/apache2ctl").Arg("-D").Arg("FOREGROUND") - frontend.AddEnv("NAME", "frontend_http") - frontend.AddEnv("SERVICE_80_NAME", "frontend_http") - frontend.AddEnv("SERVICE_443_NAME", "frontend_https") - frontend.AddEnv("BACKEND_MYSQL", "/product/group/mysql/3306;3306") - frontend.AddEnv("BACKEND_CACHE", "/product/group/cache/6379;6379") - frontend.DependsOn("/product/group/cache") - frontend.DependsOn("/product/group/mysql") - frontend.Container.Docker.Container("quay.io/gambol99/apache-php:latest").Expose(80).Expose(443) - _, err = frontend.CheckHTTP("/hostname.php", 80, 10) - Assert(err) - - mysql := marathon.NewDockerApplication() - mysql.Name("/product/group/mysql") - mysql.CPU(0.1).Memory(128).Storage(0.0).Count(1) - mysql.AddEnv("NAME", "group_cache") - mysql.AddEnv("SERVICE_3306_NAME", "mysql") - mysql.AddEnv("MYSQL_PASS", "mysql") - mysql.Container.Docker.Container("tutum/mysql").Expose(3306) - _, err = mysql.CheckTCP(3306, 10) - Assert(err) - - redis := marathon.NewDockerApplication() - redis.Name("/product/group/cache") - redis.CPU(0.1).Memory(64).Storage(0.0).Count(2) - redis.AddEnv("NAME", "group_cache") - redis.AddEnv("SERVICE_6379_NAME", "redis") - redis.Container.Docker.Container("redis:latest").Expose(6379) - _, err = redis.CheckTCP(6379, 10) - Assert(err) - - group := marathon.NewApplicationGroup(GROUP_NAME) - group.App(frontend).App(redis).App(mysql) - - Assert(client.CreateGroup(group, true)) - glog.Infof("Successfully created the group: %s", group.ID) - - glog.Infof("Updating the group paramaters") - frontend.Count(4) - - id, err := client.UpdateGroup(GROUP_NAME, group) - Assert(err) - glog.Infof("Successfully updated the group: %s, version: %s", group.ID, id.DeploymentID) - Assert(client.WaitOnGroup(GROUP_NAME, 0)) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/multiple_endpoints/main.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/multiple_endpoints/main.go deleted file mode 100644 index 6cf4e2465..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/examples/multiple_endpoints/main.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "os" - "time" - - marathon "github.com/gambol99/go-marathon" - - "github.com/golang/glog" -) - -var marathon_url string - -func init() { - flag.StringVar(&marathon_url, "url", "http://127.0.0.1:8080,127.0.0.1:8080", "the url for the marathon endpoint") -} - -func Assert(err error) { - if err != nil { - glog.Fatalf("Failed, error: %s", err) - } -} - -func main() { - flag.Parse() - config := marathon.NewDefaultConfig() - config.URL = marathon_url - config.LogOutput = os.Stdout - client, err := marathon.NewClient(config) - if err != nil { - glog.Fatalf("Failed to create a client for marathon, error: %s", err) - } - for { - if application, err := client.Applications(nil); err != nil { - glog.Errorf("Failed to retrieve a list of applications, error: %s", err) - } else { - glog.Infof("Retrieved a list of applications, %v", application) - } - glog.Infof("Going to sleep for 20 seconds") - time.Sleep(5 * time.Second) - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/group.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/group.go deleted file mode 100644 index 59079d5f2..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/group.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "fmt" - "time" -) - -type Group struct { - ID string `json:"id"` - Apps []*Application `json:"apps"` - Dependencies []string `json:"dependencies"` - Groups []*Group `json:"groups"` -} - -type Groups struct { - ID string `json:"id"` - Apps []*Application `json:"apps"` - Dependencies []string `json:"dependencies"` - Groups []*Group `json:"groups"` -} - -// Create a new Application Group -// name: the name of the group -func NewApplicationGroup(name string) *Group { - return &Group{ - ID: name, - Apps: make([]*Application, 0), - Dependencies: make([]string, 0), - Groups: make([]*Group, 0), - } -} - -// Specify the name of the group -// name: the name of the group -func (group *Group) Name(name string) *Group { - group.ID = validateID(name) - return group -} - -// Add a application to the group in question -// application: a pointer to the Application -func (group *Group) App(application *Application) *Group { - if group.Apps == nil { - group.Apps = make([]*Application, 0) - } - group.Apps = append(group.Apps, application) - return group -} - -// Retrieve a list of all the groups from marathon -func (client *Client) Groups() (*Groups, error) { - groups := new(Groups) - if err := client.apiGet(MARATHON_API_GROUPS, "", groups); err != nil { - return nil, err - } - return groups, nil -} - -// Retrieve the configuration of a specific group from marathon -// name: the identifier for the group -func (client *Client) Group(name string) (*Group, error) { - group := new(Group) - if err := client.apiGet(fmt.Sprintf("%s/%s", MARATHON_API_GROUPS, trimRootPath(name)), nil, group); err != nil { - return nil, err - } - return group, nil -} - -// Check if the group exists in marathon -// name: the identifier for the group -func (client *Client) HasGroup(name string) (bool, error) { - uri := fmt.Sprintf("%s/%s", MARATHON_API_GROUPS, trimRootPath(name)) - status, _, err := client.apiCall(HTTP_GET, uri, "", nil) - if err == nil { - return true, nil - } else if status == 404 { - return false, nil - } else { - return false, err - } -} - -// Create a new group in marathon -// group: a pointer the Group structure defining the group -func (client *Client) CreateGroup(group *Group, wait_on_running bool) error { - if err := client.apiPost(MARATHON_API_GROUPS, group, nil); err != nil { - return err - } - if wait_on_running { - return client.WaitOnGroup(group.ID, 0) - } - return nil -} - -// Waits for all the applications in a group to be deployed -// group: the identifier for the group -// timeout: a duration of time to wait before considering it failed (all tasks in all apps running defined as deployed) -func (client *Client) WaitOnGroup(name string, timeout time.Duration) error { - if timeout <= 0 { - timeout = time.Duration(500) * time.Second - } - err := deadline(timeout, func(stop_channel chan bool) error { - var flick AtomicSwitch - go func() { - <-stop_channel - close(stop_channel) - flick.SwitchOn() - }() - for !flick.IsSwitched() { - if group, err := client.Group(name); err != nil { - continue - } else { - all_running := true - // for each of the application, check if the tasks and running - for _, appID := range group.Apps { - // Arrrgghhh!! .. so we can't use application instances from the Application struct like with app wait on as it - // appears the instance count is not set straight away!! .. it defaults to zero and changes probably at the - // dependencies gets deployed. Which is probably how it internally handles dependencies .. - // step: grab the application - application, err := client.Application(appID.ID) - if err != nil { - all_running = false - break - } - - if application.Tasks == nil { - all_running = false - } else if len(application.Tasks) != appID.Instances { - all_running = false - } else if application.TasksRunning != appID.Instances { - all_running = false - } else if len(application.Deployments()) > 0 { - all_running = false - } - } - // has anyone toggle the flag? - if all_running { - return nil - } - } - time.Sleep(time.Duration(1) * time.Second) - } - return nil - }) - return err -} - -// Delete a group from marathon -// name: the identifier for the group -func (client *Client) DeleteGroup(name string) (*DeploymentID, error) { - version := new(DeploymentID) - uri := fmt.Sprintf("%s/%s", MARATHON_API_GROUPS, trimRootPath(name)) - if err := client.apiDelete(uri, nil, version); err != nil { - return nil, err - } - return version, nil -} - -// Update the parameters of a groups -// name: the identifier for the group -// group: the group structure with the new params -func (client *Client) UpdateGroup(name string, group *Group) (*DeploymentID, error) { - deploymentId := new(DeploymentID) - uri := fmt.Sprintf("%s/%s", MARATHON_API_GROUPS, trimRootPath(name)) - if err := client.apiPut(uri, group, deploymentId); err != nil { - return nil, err - } - return deploymentId, nil -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/group_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/group_test.go deleted file mode 100644 index fcf02deda..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/group_test.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGroups(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - groups, err := client.Groups() - assert.Nil(t, err) - assert.NotNil(t, groups) - assert.Equal(t, len(groups.Groups), 1) - group := groups.Groups[0] - assert.Equal(t, group.ID, FAKE_GROUP_NAME) -} - -func TestGroup(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - group, err := client.Group(FAKE_GROUP_NAME) - assert.Nil(t, err) - assert.NotNil(t, group) - assert.Equal(t, len(group.Apps), 1) - assert.Equal(t, group.ID, FAKE_GROUP_NAME) - - group, err = client.Group(FAKE_GROUP_NAME_1) - - assert.Nil(t, err) - assert.NotNil(t, group) - assert.Equal(t, group.ID, FAKE_GROUP_NAME_1) - assert.NotNil(t, group.Groups) - assert.Equal(t, len(group.Groups), 1) - - frontend := group.Groups[0] - assert.Equal(t, frontend.ID, "frontend") - assert.Equal(t, len(frontend.Apps), 3) - for _, app := range frontend.Apps { - assert.NotNil(t, app.Container) - assert.NotNil(t, app.Container.Docker) - assert.Equal(t, app.Container.Docker.Network, "BRIDGE") - if len(app.Container.Docker.PortMappings) <= 0 { - t.Fail() - } - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/health.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/health.go deleted file mode 100644 index 6671fdeb3..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/health.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -type HealthCheck struct { - Command *Command `json:"command,omitempty"` - Protocol string `json:"protocol,omitempty"` - Path string `json:"path,omitempty"` - GracePeriodSeconds int `json:"gracePeriodSeconds,omitempty"` - IntervalSeconds int `json:"intervalSeconds,omitempty"` - PortIndex int `json:"portIndex,omitempty"` - MaxConsecutiveFailures int `json:"maxConsecutiveFailures,omitempty"` - TimeoutSeconds int `json:"timeoutSeconds,omitempty"` -} - -func NewDefaultHealthCheck() *HealthCheck { - return &HealthCheck{ - Protocol: "HTTP", - Path: "", - GracePeriodSeconds: 30, - IntervalSeconds: 10, - PortIndex: 0, - MaxConsecutiveFailures: 3, - TimeoutSeconds: 5, - } -} - -type HealthCheckResult struct { - Alive bool `json:"alive"` - ConsecutiveFailures int `json:"consecutiveFailures"` - FirstSuccess string `json:"firstSuccess"` - LastFailure string `json:"lastFailure"` - LastSuccess string `json:"lastSuccess"` - TaskID string `json:"taskId"` -} - -type Command struct { - Value string `json:"value"` -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/info.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/info.go deleted file mode 100644 index 9b305569f..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/info.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -type Info struct { - EventSubscriber struct { - HttpEndpoints []string `json:"http_endpoints"` - Type string `json:"type"` - } `json:"event_subscriber"` - FrameworkId string `json:"frameworkId"` - HttpConfig struct { - AssetsPath interface{} `json:"assets_path"` - HttpPort float64 `json:"http_port"` - HttpsPort float64 `json:"https_port"` - } `json:"http_config"` - Leader string `json:"leader"` - MarathonConfig struct { - Checkpoint bool `json:"checkpoint"` - Executor string `json:"executor"` - FailoverTimeout float64 `json:"failover_timeout"` - Ha bool `json:"ha"` - Hostname string `json:"hostname"` - LocalPortMax float64 `json:"local_port_max"` - LocalPortMin float64 `json:"local_port_min"` - Master string `json:"master"` - MesosRole string `json:"mesos_role"` - MesosUser string `json:"mesos_user"` - ReconciliationInitialDelay float64 `json:"reconciliation_initial_delay"` - ReconciliationInterval float64 `json:"reconciliation_interval"` - TaskLaunchTimeout float64 `json:"task_launch_timeout"` - } `json:"marathon_config"` - Name string `json:"name"` - Version string `json:"version"` - ZookeeperConfig struct { - Zk string `json:"zk"` - ZkFutureTimeout struct { - Duration float64 `json:"duration"` - } `json:"zk_future_timeout"` - ZkHosts string `json:"zk_hosts"` - ZkPath string `json:"zk_path"` - ZkState string `json:"zk_state"` - ZkTimeout float64 `json:"zk_timeout"` - } `json:"zookeeper_config"` -} - -func (client *Client) Info() (*Info, error) { - info := new(Info) - if err := client.apiGet(MARATHON_API_INFO, nil, info); err != nil { - return nil, err - } else { - return info, nil - } -} - -func (client *Client) Leader() (string, error) { - var leader struct { - Leader string `json:"leader"` - } - if err := client.apiGet(MARATHON_API_LEADER, nil, &leader); err != nil { - return "", err - } else { - return leader.Leader, nil - } -} - -func (client *Client) AbdicateLeader() (string, error) { - message := new(Message) - if err := client.apiDelete(MARATHON_API_LEADER, nil, message); err != nil { - return "", err - } else { - return message.Message, err - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/info_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/info_test.go deleted file mode 100644 index 4a71fba57..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/info_test.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestInfo(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - info, err := client.Info() - assert.Nil(t, err) - assert.Equal(t, info.FrameworkId, "20140730-222531-1863654316-5050-10422-0000") - assert.Equal(t, info.Leader, "127.0.0.1:8080") - assert.Equal(t, info.Version, "0.7.0-SNAPSHOT") -} - -func TestLeader(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - leader, err := client.Leader() - assert.Nil(t, err) - assert.Equal(t, leader, "127.0.0.1:8080") -} - -func TestAbdicateLeader(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - message, err := client.AbdicateLeader() - assert.Nil(t, err) - assert.Equal(t, message, "Leadership abdicted") -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/last_task_failure.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/last_task_failure.go deleted file mode 100644 index d0e770f38..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/last_task_failure.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -type LastTaskFailure struct { - AppId string `json:"appId,omitempty"` - Host string `json:"host,omitempty"` - Message string `json:"message,omitempty"` - State string `json:"state,omitempty"` - TaskId string `json:"taskId,omitempty"` - Timestamp string `json:"timestamp,omitempty"` - Version string `json:"version,omitempty"` -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription.go deleted file mode 100644 index 52696a9ab..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription.go +++ /dev/null @@ -1,261 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strings" - "time" -) - -type Subscriptions struct { - CallbackURLs []string `json:"callbackUrls"` -} - -// Retrieve a list of registered subscriptions -func (client *Client) Subscriptions() (*Subscriptions, error) { - subscriptions := new(Subscriptions) - if err := client.apiGet(MARATHON_API_SUBSCRIPTION, nil, subscriptions); err != nil { - return nil, err - } else { - return subscriptions, nil - } -} - -// Add your self as a listener to events from Marathon -// channel: a EventsChannel used to receive event on -func (client *Client) AddEventsListener(channel EventsChannel, filter int) error { - client.Lock() - defer client.Unlock() - // step: someone has asked to start listening to event, we need to register for events - // if we haven't done so already - if err := client.RegisterSubscription(); err != nil { - return err - } - - if _, found := client.listeners[channel]; !found { - client.log("AddEventsListener() Adding a watch for events: %d, channel: %v", filter, channel) - client.listeners[channel] = filter - } - return nil -} - -// Remove the channel from the events listeners -// channel: the channel you are removing -func (client *Client) RemoveEventsListener(channel EventsChannel) { - client.Lock() - defer client.Unlock() - if _, found := client.listeners[channel]; found { - delete(client.listeners, channel) - /* step: if there is no one listening anymore, lets remove our self - from the events callback */ - if len(client.listeners) <= 0 { - client.UnSubscribe() - } - } -} - -// Retrieve the subscription call back URL used when registering -func (client *Client) SubscriptionURL() string { - return fmt.Sprintf("http://%s:%d%s", client.ipaddress, client.config.EventsPort, DEFAULT_EVENTS_URL) -} - -// Register ourselves with Marathon to receive events from it's callback facility -func (client *Client) RegisterSubscription() error { - if client.events_http == nil { - if ip_address, err := getInterfaceAddress(client.config.EventsInterface); err != nil { - return errors.New(fmt.Sprintf("Unable to get the ip address from the interface: %s, error: %s", - client.config.EventsInterface, err)) - } else { - // step: set the ip address - client.ipaddress = ip_address - binding := fmt.Sprintf("%s:%d", ip_address, client.config.EventsPort) - // step: register the handler - http.HandleFunc(DEFAULT_EVENTS_URL, client.HandleMarathonEvent) - // step: create the http server - client.events_http = &http.Server{ - Addr: binding, - Handler: nil, - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - } - client.log("RegisterSubscription() Attempting to listen on binding: %s", binding) - - // @todo need to add a timeout value here - listener, err := net.Listen("tcp", binding) - if err != nil { - return nil - } - - client.log("RegisterSubscription() Starting to listen on http events service") - go func() { - for { - client.events_http.Serve(listener) - client.log("RegisterSubscription() Exitted the http events service") - } - }() - } - } - - // step: get the callback url - callback := client.SubscriptionURL() - - // step: check if the callback is registered - client.log("RegisterSubscription() Checking if we already have a subscription for callback %s", callback) - if found, err := client.HasSubscription(callback); err != nil { - return err - } else if !found { - client.log("RegisterSubscription() Registering a subscription with Marathon: callback: %s", callback) - // step: we need to register our self - uri := fmt.Sprintf("%s?callbackUrl=%s", MARATHON_API_SUBSCRIPTION, callback) - if err := client.apiPost(uri, "", nil); err != nil { - return err - } - } else { - client.log("RegisterSubscription() A subscription already exists for this callback: %s", callback) - } - return nil -} - -// Remove ourselves from Marathon's callback facility -func (client *Client) UnSubscribe() error { - /* step: remove from the list of subscriptions */ - return client.apiDelete(fmt.Sprintf("%s?callbackUrl=%s", MARATHON_API_SUBSCRIPTION, client.SubscriptionURL()), nil, nil) -} - -// Check to see a subscription already exists with Marathon -// callback: the url of the callback -func (client *Client) HasSubscription(callback string) (bool, error) { - client.log("HasSubscription() Checking for subscription: %s", callback) - /* step: generate our events callback */ - if subscriptions, err := client.Subscriptions(); err != nil { - return false, err - } else { - for _, subscription := range subscriptions.CallbackURLs { - if callback == subscription { - return true, nil - } - } - } - return false, nil -} - -func (client *Client) HandleMarathonEvent(writer http.ResponseWriter, request *http.Request) { - body, err := ioutil.ReadAll(request.Body) - if err != nil { - client.log("HandleMarathonEvent() Failed to decode the event type, content: %s, error: %s") - return - } - - // step: process the event and decode the event - content := string(body[:]) - event_type := new(EventType) - err = json.NewDecoder(strings.NewReader(content)).Decode(event_type) - if err != nil { - client.log("HandleMarathonEvent() Failed to decode the event type, content: %s, error: %s", content, err) - return - } - - client.log("HandleMarathonEvent() Recieved marathon event, %s", event_type.EventType) - - // step: check the type is handled - event, err := client.GetEvent(event_type.EventType) - if err != nil { - client.log("HandleMarathonEvent() Unable to retrieve the event, type: %s", event_type.EventType) - return - } - - // step: lets decode message - err = json.NewDecoder(strings.NewReader(content)).Decode(event.Event) - if err != nil { - client.log("HandleMarathonEvent() Failed to decode the event type: %d, name: %s error: %s", event.ID, err) - return - } - - client.log("HandleMarathonEvent() Decoded the marathon event, %s", event) - - client.RLock() - defer client.RUnlock() - - // step: check if anyone is listen for this event - for channel, filter := range client.listeners { - // step: check if this listener wants this event type - client.log("HandleMarathonEvent() checking: channel: %v, type: %d, filter: %d", channel, event.ID, filter) - if event.ID&filter != 0 { - client.log("HandleMarathonEvent() Event type: %d being listened to, sending to listener: %v", event.ID, channel) - go func(ch EventsChannel, e *Event) { - ch <- e - }(channel, event) - } else { - client.log("HandleMarathonEvent() Event type: %d is not being listened to by listener: %v", event.ID, channel) - } - } -} - -func (client *Client) GetEvent(name string) (*Event, error) { - // step: check it's supported - if id, found := Events[name]; found { - event := new(Event) - event.ID = id - event.Name = name - switch name { - case "api_post_event": - event.Event = new(EventAPIRequest) - case "status_update_event": - event.Event = new(EventStatusUpdate) - case "framework_message_event": - event.Event = new(EventFrameworkMessage) - case "subscribe_event": - event.Event = new(EventSubscription) - case "unsubscribe_event": - event.Event = new(EventUnsubscription) - case "add_health_check_event": - event.Event = new(EventAddHealthCheck) - case "remove_health_check_event": - event.Event = new(EventRemoveHealthCheck) - case "failed_health_check_event": - event.Event = new(EventFailedHealthCheck) - case "health_status_changed_event": - event.Event = new(EventHealthCheckChanged) - case "group_change_success": - event.Event = new(EventGroupChangeSuccess) - case "group_change_failed": - event.Event = new(EventGroupChangeFailed) - case "deployment_success": - event.Event = new(EventDeploymentSuccess) - case "deployment_failed": - event.Event = new(EventDeploymentFailed) - case "deployment_info": - event.Event = new(EventDeploymentInfo) - case "deployment_step_success": - event.Event = new(EventDeploymentStepSuccess) - case "deployment_step_failure": - event.Event = new(EventDeploymentStepFailure) - case "app_terminated_event": - event.Event = new(EventAppTerminated) - } - return event, nil - } else { - return nil, errors.New(fmt.Sprintf("The event type: %d was not found or supported", name)) - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription_test.go deleted file mode 100644 index 348739fe0..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/subscription_test.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestSubscriptions(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - sub, err := client.Subscriptions() - assert.Nil(t, err) - assert.NotNil(t, sub) - assert.NotNil(t, sub.CallbackURLs) - assert.Equal(t, len(sub.CallbackURLs), 1) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/task.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/task.go deleted file mode 100644 index b20f6d5da..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/task.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "fmt" - "net/url" - "strconv" - "strings" -) - -type Tasks struct { - Tasks []Task `json:"tasks"` -} - -type Task struct { - ID string `json:"id"` - AppID string `json:"appId"` - Host string `json:"host"` - HealthCheckResult []*HealthCheckResult `json:"healthCheckResults"` - Ports []int `json:"ports"` - ServicePorts []int `json:"servicePorts"` - StagedAt string `json:"stagedAt"` - StartedAt string `json:"startedAt"` - Version string `json:"version"` -} - -func (task Task) String() string { - return fmt.Sprintf("id: %s, application: %s, host: %s, ports: %s, created: %s", - task.ID, task.AppID, task.Host, task.Ports, task.StartedAt) -} - -// Check if the task has any health checks -func (task *Task) HasHealthCheckResults() bool { - if task.HealthCheckResult == nil || len(task.HealthCheckResult) <= 0 { - return false - } - return true -} - -// Retrieve all the tasks currently running -func (client *Client) AllTasks() (*Tasks, error) { - tasks := new(Tasks) - if err := client.apiGet(MARATHON_API_TASKS, nil, tasks); err != nil { - return nil, err - } else { - return tasks, nil - } -} - -// Retrieve a list of tasks for an application -// application_id: the id for the application -func (client *Client) Tasks(id string) (*Tasks, error) { - tasks := new(Tasks) - if err := client.apiGet(fmt.Sprintf("%s/%s/tasks", MARATHON_API_APPS, trimRootPath(id)), nil, tasks); err != nil { - return nil, err - } else { - return tasks, nil - } -} - -// Retrieve an array of task ids currently running in marathon -func (client *Client) ListTasks() ([]string, error) { - if tasks, err := client.AllTasks(); err != nil { - return nil, err - } else { - list := make([]string, 0) - for _, task := range tasks.Tasks { - list = append(list, task.ID) - } - return list, nil - } -} - -// Kill all tasks relating to an application -// application_id: the id for the application -// host: kill only those tasks on a specific host (optional) -// scale: Scale the app down (i.e. decrement its instances setting by the number of tasks killed) after killing the specified tasks -func (client *Client) KillApplicationTasks(id, hostname string, scale bool) (*Tasks, error) { - var options struct { - Host string `json:"host"` - Scale bool `json:bool` - } - options.Host = hostname - options.Scale = scale - tasks := new(Tasks) - client.log("KillApplicationTasks() Killing application tasks for: %s, hostname: %s, scale: %t", id, hostname, scale) - if err := client.apiDelete(fmt.Sprintf("%s/%s/tasks", MARATHON_API_APPS, trimRootPath(id)), &options, tasks); err != nil { - return nil, err - } - return tasks, nil -} - -// Kill the task associated with a given ID -// task_id: the id for the task -// scale: Scale the app down -func (client *Client) KillTask(taskId string, scale bool) (*Task, error) { - var options struct { - Scale bool `json:bool` - } - options.Scale = scale - task := new(Task) - appName := taskId[0:strings.LastIndex(taskId, ".")] - client.log("KillTask Killing task `%s` for: %s, scale: %t", taskId, appName, scale) - if err := client.apiDelete(fmt.Sprintf("%s/%s/tasks/%s", MARATHON_API_APPS, appName, taskId), &options, task); err != nil { - return nil, err - } - return task, nil -} - -// Kill tasks associated with given array of ids -// tasks: the array of task ids -// scale: Scale the app down -func (client *Client) KillTasks(tasks []string, scale bool) error { - v := url.Values{} - v.Add("scale", strconv.FormatBool(scale)) - var post struct { - TaskIDs []string `json:"ids"` - } - post.TaskIDs = tasks - client.log("KillTasks Killing %d tasks", len(tasks)) - return client.apiPost(fmt.Sprintf("%s/delete?%s", MARATHON_API_TASKS, v.Encode()), &post, nil) -} - -// Get the endpoints i.e. HOST_IP:DYNAMIC_PORT for a specific application service -// I.e. a container running apache, might have ports 80/443 (translated to X dynamic ports), but i want -// port 80 only and i only want those whom have passed the health check -// -// Note: I've NO IDEA how to associate the health_check_result to the actual port, I don't think it's -// possible at the moment, however, given marathon will fail and restart an application even if one of x ports of a task is -// down, the per port check is redundant??? .. personally, I like it anyhow, but hey -// - -// name: the identifier for the application -// port: the container port you are interested in -// health: whether to check the health or not -func (client *Client) TaskEndpoints(name string, port int, health_check bool) ([]string, error) { - /* step: get the application details */ - if application, err := client.Application(name); err != nil { - return nil, err - } else { - /* step: we need to get the port index of the service we are interested in */ - if port_index, err := application.Container.Docker.ServicePortIndex(port); err != nil { - return nil, err - } else { - list := make([]string, 0) - /* step: do we have any tasks? */ - if application.Tasks == nil || len(application.Tasks) <= 0 { - return list, nil - } - - /* step: iterate the tasks and extract the dynamic ports */ - for _, task := range application.Tasks { - /* step: if we are checking health the 'service' has a health check? */ - if health_check && application.HasHealthChecks() { - /* - check: does the task have a health check result, if NOT, it's because the - health of the task hasn't yet been performed, hence we assume it as DOWN - */ - if task.HasHealthCheckResults() == false { - client.log("TaskEndpoints() The task: %s for application: %s hasn't been checked yet, skipping", task, application) - continue - } - - /* step: check the health results then */ - skip_endpoint := false - for _, health := range task.HealthCheckResult { - if health.Alive == false { - client.log("TaskEndpoints() The task: %s for application: %s failed health checks", task, application) - skip_endpoint = true - } - } - - if skip_endpoint == true { - continue - } - } - /* else we can just add it */ - list = append(list, fmt.Sprintf("%s:%d", task.Host, task.Ports[port_index])) - } - return list, nil - } - } -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/task_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/task_test.go deleted file mode 100644 index 301d5f762..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/task_test.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAllTasks(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - tasks, err := client.AllTasks() - assert.Nil(t, err) - assert.NotNil(t, tasks) - assert.Equal(t, len(tasks.Tasks), 2) -} - -func TestTaskEndpoints(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - - endpoints, err := client.TaskEndpoints(FAKE_APP_NAME_BROKEN, 8080, true) - assert.Nil(t, err) - assert.NotNil(t, endpoints) - assert.Equal(t, len(endpoints), 1, t) - assert.Equal(t, endpoints[0], "10.141.141.10:31045", t) - - endpoints, err = client.TaskEndpoints(FAKE_APP_NAME_BROKEN, 8080, false) - assert.Nil(t, err) - assert.NotNil(t, endpoints) - assert.Equal(t, len(endpoints), 2, t) - assert.Equal(t, endpoints[0], "10.141.141.10:31045", t) - assert.Equal(t, endpoints[1], "10.141.141.10:31234", t) - - endpoints, err = client.TaskEndpoints(FAKE_APP_NAME_BROKEN, 80, true) - assert.NotNil(t, err) -} - -func TestKillApplicationTasks(t *testing.T) { - client := NewFakeMarathonEndpoint(t) - tasks, err := client.KillApplicationTasks(FAKE_APP_NAME, "", false) - assert.Nil(t, err) - assert.NotNil(t, tasks) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/testing_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/testing_test.go deleted file mode 100644 index 31b2395bf..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/testing_test.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "fmt" - "io/ioutil" - "net/http" - "sync" - "testing" - - yaml "gopkg.in/yaml.v2" -) - -const ( - FAKE_MARATHON_URL = "http://127.0.0.1:3000,127.0.0.1:3000" - FAKE_GROUP_NAME = "/test" - FAKE_GROUP_NAME_1 = "/qa/product/1" - FAKE_APP_NAME = "/fake_app" - FAKE_APP_NAME_BROKEN = "/fake_app_broken" - FAKE_DEPLOYMENT_ID = "867ed450-f6a8-4d33-9b0e-e11c5513990b" - FAKE_API_FILENAME = "./tests/rest-api/methods.yml" - FAKE_API_PORT = 3000 -) - -type RestMethod struct { - // the uri of the method - URI string `yaml:"uri,omitempty"` - // the http method type (GET|PUT etc) - Method string `yaml:"method,omitempty"` - // the content i.e. response - Content string `yaml:"content,omitempty"` -} - -var testClient struct { - sync.Once - client Marathon -} - -func NewFakeMarathonEndpoint(t *testing.T) Marathon { - testClient.Once.Do(func() { - - // step: open and read in the methods yaml - contents, err := ioutil.ReadFile(FAKE_API_FILENAME) - if err != nil { - t.Fatalf("unable to read in the methods yaml file: %s", FAKE_API_FILENAME) - } - // step: unmarshal the yaml - var methods []*RestMethod - err = yaml.Unmarshal([]byte(contents), &methods) - if err != nil { - t.Fatalf("Unable to unmarshall the methods yaml, error: %s", err) - } - - // step: construct a hash from the methods - uris := make(map[string]*string, 0) - for _, method := range methods { - uris[fmt.Sprintf("%s:%s", method.Method, method.URI)] = &method.Content - } - - http.HandleFunc("/", func(writer http.ResponseWriter, reader *http.Request) { - key := fmt.Sprintf("%s:%s", reader.Method, reader.RequestURI) - if content, found := uris[key]; found { - writer.Header().Add("Content-Type", "application/json") - writer.Write([]byte(*content)) - } - }) - - go http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", FAKE_API_PORT), nil) - - config := NewDefaultConfig() - config.URL = FAKE_MARATHON_URL - //config.LogOutput = os.Stdout - if testClient.client, err = NewClient(config); err != nil { - t.Fatalf("Failed to create the fake client, %s, error: %s", FAKE_MARATHON_URL, err) - } - }) - return testClient.client -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/Gemfile b/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/Gemfile deleted file mode 100644 index e03a992ef..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/Gemfile +++ /dev/null @@ -1,4 +0,0 @@ -source 'https://rubygems.org' - -gem 'sinatra' -gem 'thin' diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/config.ru b/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/config.ru deleted file mode 100644 index 75bd91149..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/config.ru +++ /dev/null @@ -1,7 +0,0 @@ -$:.unshift File.join(File.dirname(__FILE__),'.') - -require 'rack' -require 'sinatra' -require 'rest-api' - -run RestAPI::Application diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/main.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/main.go deleted file mode 100644 index e91c626fa..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/main.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "net/http" - "os" - - "github.com/golang/glog" - yaml "gopkg.in/yaml.v2" -) - -type RestMethod struct { - // the uri of the method - URI string `yaml:"uri,omitempty"` - // the http method type (GET|PUT etc) - Method string `yaml:"method,omitempty"` - // the content i.e. response - Content string `yaml:"content,omitempty"` -} - -var Options struct { - // the filename containing the methods - method_file string - // port we should be listening on - port int -} - -func init() { - flag.StringVar(&Options.method_file, "methods", "methods.yml", "the name of the file containing the methods") - flag.IntVar(&Options.port, "port", 3000, "the port we should be listening on") -} - -func main() { - flag.Parse() - if Options.method_file == "" { - glog.Errorf("You have not specified the methods file to import") - os.Exit(1) - } - - // step: open and read in the methods yaml - contents, err := ioutil.ReadFile(Options.method_file) - if err != nil { - glog.Errorf("Failed to open|read the methods file: %s, error: %s", Options.method_file, err) - os.Exit(1) - } - - // step: unmarshal the yaml - var methods []*RestMethod - err = yaml.Unmarshal([]byte(contents), &methods) - if err != nil { - glog.Errorf("Failed to unmarshall the method yaml file: %s, error: %s", Options.method_file, err) - os.Exit(1) - } - - // step: construct a hash from the methods - uris := make(map[string]*string, 0) - for _, method := range methods { - uris[fmt.Sprintf("%s:%s", method.Method, method.URI)] = &method.Content - } - - http.HandleFunc("/", func(writer http.ResponseWriter, reader *http.Request) { - key := fmt.Sprintf("%s:%s", reader.Method, reader.RequestURI) - glog.V(4).Infof("Request: uri: %s, method: %s", reader.RequestURI, reader.Method) - if content, found := uris[key]; found { - glog.V(4).Infof("Content: %s", *content) - writer.Header().Add("Content-Type", "application/json") - writer.Write([]byte(*content)) - } - }) - glog.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", Options.port), nil)) -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/methods.yml b/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/methods.yml deleted file mode 100644 index f61b96dd1..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/methods.yml +++ /dev/null @@ -1,902 +0,0 @@ -- uri: /ping - method: GET - content: | - pong -- uri: /v2/apps/fake_app/versions - method: GET - content: | - { - "versions": [ - "2014-04-04T06:25:31.399Z" - ] - } -- uri: /v2/apps/fake_app - method: PUT - content: | - { - "deploymentId": "83b215a6-4e26-4e44-9333-5c385eda6438", - "version": "2014-04-04T06:25:31.399Z" - } -- uri: /v2/apps - method: POST - content: | - { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "maxLaunchDelaySeconds": 3600, - "cmd": "env && python3 -m http.server $PORT0", - "constraints": [ - [ - "hostname", - "UNIQUE" - ] - ], - "container": { - "docker": { - "image": "python:3" - }, - "type": "DOCKER", - "volumes": [] - }, - "cpus": 0.25, - "dependencies": [], - "deployments": [ - { - "id": "f44fd4fc-4330-4600-a68b-99c7bd33014a" - } - ], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [ - { - "command": null, - "gracePeriodSeconds": 3, - "intervalSeconds": 10, - "maxConsecutiveFailures": 3, - "path": "/", - "portIndex": 0, - "protocol": "HTTP", - "timeoutSeconds": 5 - } - ], - "id": "/fake_app", - "instances": 2, - "mem": 50.0, - "ports": [ - 0 - ], - "requirePorts": false, - "storeUrls": [], - "upgradeStrategy": { - "minimumHealthCapacity": 0.5, - "maximumOverCapacity": 0.5 - }, - "uris": [], - "user": null, - "version": "2014-08-18T22:36:41.451Z" - } -- uri: /v2/apps - method: GET - content: | - { - "apps": [ - { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "cmd": "python3 -m http.server 8080", - "constraints": [], - "container": { - "docker": { - "image": "python:3", - "network": "BRIDGE", - "portMappings": [ - { - "containerPort": 8080, - "hostPort": 0, - "servicePort": 9000, - "protocol": "tcp" - }, - { - "containerPort": 161, - "hostPort": 0, - "protocol": "udp" - } - ] - }, - "type": "DOCKER", - "volumes": [] - }, - "cpus": 0.5, - "dependencies": [], - "deployments": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [ - { - "command": null, - "gracePeriodSeconds": 5, - "intervalSeconds": 20, - "maxConsecutiveFailures": 3, - "path": "/", - "portIndex": 0, - "protocol": "HTTP", - "timeoutSeconds": 20 - } - ], - "id": "/fake_app", - "instances": 2, - "mem": 64.0, - "ports": [ - 10000, - 10001 - ], - "requirePorts": false, - "storeUrls": [], - "tasksRunning": 2, - "tasksStaged": 0, - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [], - "user": null, - "version": "2014-09-25T02:26:59.256Z" - }, - { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "cmd": "python3 -m http.server 8080", - "constraints": [], - "container": { - "docker": { - "image": "python:3", - "network": "BRIDGE", - "portMappings": [ - { - "containerPort": 8080, - "hostPort": 0, - "servicePort": 9000, - "protocol": "tcp" - }, - { - "containerPort": 161, - "hostPort": 0, - "protocol": "udp" - } - ] - }, - "type": "DOCKER", - "volumes": [] - }, - "cpus": 0.5, - "dependencies": [], - "deployments": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [ - { - "command": null, - "gracePeriodSeconds": 5, - "intervalSeconds": 20, - "maxConsecutiveFailures": 3, - "path": "/", - "portIndex": 0, - "protocol": "HTTP", - "timeoutSeconds": 20 - } - ], - "id": "/fake_app_broken", - "instances": 2, - "mem": 64.0, - "ports": [ - 10000, - 10001 - ], - "requirePorts": false, - "storeUrls": [], - "tasksRunning": 2, - "tasksStaged": 0, - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [], - "user": null, - "version": "2014-09-25T02:26:59.256Z" - } - ] - } -- uri: /v2/apps/fake_app/tasks - method: DELETE - content: | - { - "tasks": [] - } -- uri: /v2/apps/fake_app - method: GET - content: | - { - "app": { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "cmd": "python toggle.py $PORT0", - "constraints": [], - "container": { - "docker": { - "image": "python:3", - "network": "BRIDGE", - "portMappings": [ - { - "containerPort": 8080, - "hostPort": 0, - "servicePort": 9000, - "protocol": "tcp" - } - ] - }, - "type": "DOCKER", - "volumes": [] - }, - "cpus": 0.2, - "dependencies": [], - "deployments": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [ - { - "command": null, - "gracePeriodSeconds": 5, - "intervalSeconds": 10, - "maxConsecutiveFailures": 3, - "path": "/health", - "portIndex": 0, - "protocol": "HTTP", - "timeoutSeconds": 10 - } - ], - "id": "/fake_app", - "instances": 2, - "lastTaskFailure": { - "appId": "/toggle", - "host": "10.141.141.10", - "message": "Abnormal executor termination", - "state": "TASK_FAILED", - "taskId": "toggle.cc427e60-5046-11e4-9e34-56847afe9799", - "timestamp": "2014-09-12T23:23:41.711Z", - "version": "2014-09-12T23:28:21.737Z" - }, - "mem": 32.0, - "ports": [ - 10000 - ], - "requirePorts": false, - "storeUrls": [], - "tasks": [ - { - "appId": "/toggle", - "healthCheckResults": [ - { - "alive": true, - "consecutiveFailures": 0, - "firstSuccess": "2014-09-13T00:20:28.101Z", - "lastFailure": null, - "lastSuccess": "2014-09-13T00:25:07.506Z", - "taskId": "toggle.802df2ae-3ad4-11e4-a400-56847afe9799" - } - ], - "host": "10.141.141.10", - "id": "toggle.802df2ae-3ad4-11e4-a400-56847afe9799", - "ports": [ - 31045 - ], - "stagedAt": "2014-09-12T23:28:28.594Z", - "startedAt": "2014-09-13T00:24:46.959Z", - "version": "2014-09-12T23:28:21.737Z" - }, - { - "appId": "/toggle", - "healthCheckResults": [ - { - "alive": true, - "consecutiveFailures": 0, - "firstSuccess": "2014-09-13T00:20:28.101Z", - "lastFailure": null, - "lastSuccess": "2014-09-13T00:25:07.508Z", - "taskId": "toggle.7c99814d-3ad4-11e4-a400-56847afe9799" - } - ], - "host": "10.141.141.10", - "id": "toggle.7c99814d-3ad4-11e4-a400-56847afe9799", - "ports": [ - 31234 - ], - "stagedAt": "2014-09-12T23:28:22.587Z", - "startedAt": "2014-09-13T00:24:46.965Z", - "version": "2014-09-12T23:28:21.737Z" - } - ], - "tasksRunning": 2, - "tasksStaged": 0, - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [ - "http://downloads.mesosphere.com/misc/toggle.tgz" - ], - "user": null, - "version": "2014-09-12T23:28:21.737Z" - } - } -- uri: /v2/apps/fake_app_broken - method: GET - content: | - { - "app": { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "cmd": "python toggle.py $PORT0", - "constraints": [], - "container": { - "docker": { - "image": "python:3", - "network": "BRIDGE", - "portMappings": [ - { - "containerPort": 8080, - "hostPort": 0, - "servicePort": 9000, - "protocol": "tcp" - } - ] - }, - "type": "DOCKER", - "volumes": [] - }, - "cpus": 0.2, - "dependencies": [], - "deployments": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [ - { - "command": null, - "gracePeriodSeconds": 5, - "intervalSeconds": 10, - "maxConsecutiveFailures": 3, - "path": "/health", - "portIndex": 0, - "protocol": "HTTP", - "timeoutSeconds": 10 - } - ], - "id": "/fake_app_broken", - "instances": 2, - "lastTaskFailure": { - "appId": "/toggle", - "host": "10.141.141.10", - "message": "Abnormal executor termination", - "state": "TASK_FAILED", - "taskId": "toggle.cc427e60-5046-11e4-9e34-56847afe9799", - "timestamp": "2014-09-12T23:23:41.711Z", - "version": "2014-09-12T23:28:21.737Z" - }, - "mem": 32.0, - "ports": [ - 10000 - ], - "requirePorts": false, - "storeUrls": [], - "tasks": [ - { - "appId": "/toggle", - "healthCheckResults": [ - { - "alive": true, - "consecutiveFailures": 0, - "firstSuccess": "2014-09-13T00:20:28.101Z", - "lastFailure": null, - "lastSuccess": "2014-09-13T00:25:07.506Z", - "taskId": "toggle.802df2ae-3ad4-11e4-a400-56847afe9799" - } - ], - "host": "10.141.141.10", - "id": "toggle.802df2ae-3ad4-11e4-a400-56847afe9799", - "ports": [ - 31045 - ], - "stagedAt": "2014-09-12T23:28:28.594Z", - "startedAt": "2014-09-13T00:24:46.959Z", - "version": "2014-09-12T23:28:21.737Z" - }, - { - "appId": "/toggle", - "healthCheckResults": [ - { - "alive": false, - "consecutiveFailures": 0, - "firstSuccess": "2014-09-13T00:20:28.101Z", - "lastFailure": null, - "lastSuccess": "2014-09-13T00:25:07.508Z", - "taskId": "toggle.7c99814d-3ad4-11e4-a400-56847afe9799" - } - ], - "host": "10.141.141.10", - "id": "toggle.7c99814d-3ad4-11e4-a400-56847afe9799", - "ports": [ - 31234 - ], - "stagedAt": "2014-09-12T23:28:22.587Z", - "startedAt": "2014-09-13T00:24:46.965Z", - "version": "2014-09-12T23:28:21.737Z" - } - ], - "tasksRunning": 2, - "tasksStaged": 0, - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [ - "http://downloads.mesosphere.com/misc/toggle.tgz" - ], - "user": null, - "version": "2014-09-12T23:28:21.737Z" - } - } -- uri: /v2/apps/fake_app/versions - method: GET - content: | - { - "versions": [ - "2014-04-04T06:25:31.399Z" - ] - } -- uri: /v2/apps/fake_app - method: PUT - content: | - { - "deploymentId": "83b215a6-4e26-4e44-9333-5c385eda6438", - "version": "2014-08-26T07:37:50.462Z" - } -- uri: /v2/groups - method: GET - content: | - { - "apps": [], - "dependencies": [], - "groups": [ - { - "apps": [ - { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "maxLaunchDelaySeconds": 3600, - "cmd": "sleep 30", - "constraints": [], - "container": null, - "cpus": 1.0, - "dependencies": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [], - "id": "/test/app", - "instances": 1, - "mem": 128.0, - "ports": [ - 10000 - ], - "requirePorts": false, - "storeUrls": [], - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [], - "user": null, - "version": "2014-08-28T01:05:40.586Z" - } - ], - "dependencies": [], - "groups": [], - "id": "/test", - "version": "2014-08-28T01:09:46.212Z" - } - ], - "id": "/", - "version": "2014-08-28T01:09:46.212Z" - } -- uri: /v2/groups/test - method: GET - content: | - { - "apps": [ - { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "maxLaunchDelaySeconds": 3600, - "cmd": "sleep 30", - "constraints": [], - "container": null, - "cpus": 1.0, - "dependencies": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [], - "id": "/test/app", - "instances": 1, - "mem": 128.0, - "ports": [ - 10000 - ], - "requirePorts": false, - "storeUrls": [], - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [], - "user": null, - "version": "2014-08-28T01:05:40.586Z" - } - ], - "dependencies": [], - "groups": [], - "id": "/test", - "version": "2014-08-28T01:09:46.212Z" - } -- uri: /v2/groups/qa/product/1 - method: GET - content: | - { - "id": "/qa/product/1", - "groups": [ - { - "id": "frontend", - "apps": [ - { - "cmd": "", - "container": { - "type": "DOCKER", - "docker": { - "image": "quay.io/gambol99/apache-php:latest", - "network": "BRIDGE", - "portMappings": [ - { - "containerPort": 80, - "hostPort": 0, - "protocol": "tcp" - }, - { - "containerPort": 443, - "hostPort": 0, - "protocol": "tcp" - } - ] - } - }, - "healthChecks": [ - { - "protocol": "HTTP", - "path": "/hostname.php", - "gracePeriodSeconds": 3, - "intervalSeconds": 10, - "portIndex": 0, - "timeoutSeconds": 10, - "maxConsecutiveFailures": 3 - } - ], - "id": "apache", - "mem": 64, - "args": [], - "env": { - "ENVIRONMENT": "qa", - "SERVICE_80_NAME": "apache_http-qa-1", - "SERVICE_443_NAME": "apache_https-qa-1", - "NAME": "frontend", - "BACKEND_MYSQL_MASTER": "mysql-qa-1;3306", - "BACKEND_CACHE": "redis-qa-1;6379" - }, - "dependencies": [ - "/qa/product/1/database", - "/qa/product/1/caching" - ] - },{ - "id": "database", - "container": { - "type": "DOCKER", - "docker": { - "image": "tutum/mysql", - "network": "BRIDGE", - "portMappings": [ - { "containerPort": 3306, "hostPort": 0, "protocol": "tcp" } - ] - } - }, - "healthChecks": [ - { - "portIndex": 0, - "protocol": "TCP", - "gracePeriodSeconds": 10, - "intervalSeconds": 10, - "timeoutSeconds": 5, - "maxConsecutiveFailures": 2 - } - ], - "id": "mysql", - "mem": 1024, - "cmd": "", - "env": { - "ENVIRONMENT": "qa", - "SERVICE_NAME": "dbmaster", - "SERVICE_3306_NAME": "mysql-qa-1", - "MYSQL_PASS": "mysql", - "REPLICATION_MASTER": "true", - "REPLICATION_USER": "replication", - "REPLICATION_PASS": "8d67as9f7sjhsdfsd" - } - },{ - "container": { - "type": "DOCKER", - "docker": { - "image": "redis", - "network": "BRIDGE", - "portMappings": [ - { - "containerPort": 6379, - "hostPort": 0, - "protocol": "tcp" - } - ] - } - }, - "healthChecks": [ - { - "portIndex": 0, - "protocol": "TCP", - "gracePeriodSeconds": 10, - "intervalSeconds": 10, - "timeoutSeconds": 5, - "maxConsecutiveFailures": 2 - } - ], - "id": "caching", - "cmd": "", - "mem": 128, - "env": { - "ENVIRONMENT": "qa", - "SERVICE_6379_NAME": "redis-qa-1" - } - } - ] - } - ] - } -- uri: /v2/groups/:groupId - method: PUT - content: | - { - "deploymentId": "c0e7434c-df47-4d23-99f1-78bd78662231", - "version": "2014-08-28T16:45:41.063Z" - } -- uri: /v2/groups/:groupId - method: DELETE - content: | - {"version":"2014-07-01T10:20:50.196Z"} -- uri: /v2/tasks - method: GET - content: | - { - "tasks": [ - { - "appId": "/bridged-webapp", - "healthCheckResults": [ - { - "alive": true, - "consecutiveFailures": 0, - "firstSuccess": "2014-10-03T22:57:02.246Z", - "lastFailure": null, - "lastSuccess": "2014-10-03T22:57:41.643Z", - "taskId": "bridged-webapp.eb76c51f-4b4a-11e4-ae49-56847afe9799" - } - ], - "host": "10.141.141.10", - "id": "bridged-webapp.eb76c51f-4b4a-11e4-ae49-56847afe9799", - "ports": [ - 31000 - ], - "servicePorts": [ - 9000 - ], - "stagedAt": "2014-10-03T22:16:27.811Z", - "startedAt": "2014-10-03T22:57:41.587Z", - "version": "2014-10-03T22:16:23.634Z" - }, - { - "appId": "/bridged-webapp", - "healthCheckResults": [ - { - "alive": true, - "consecutiveFailures": 0, - "firstSuccess": "2014-10-03T22:57:02.246Z", - "lastFailure": null, - "lastSuccess": "2014-10-03T22:57:41.649Z", - "taskId": "bridged-webapp.ef0b5d91-4b4a-11e4-ae49-56847afe9799" - } - ], - "host": "10.141.141.10", - "id": "bridged-webapp.ef0b5d91-4b4a-11e4-ae49-56847afe9799", - "ports": [ - 31001 - ], - "servicePorts": [ - 9000 - ], - "stagedAt": "2014-10-03T22:16:33.814Z", - "startedAt": "2014-10-03T22:57:41.593Z", - "version": "2014-10-03T22:16:23.634Z" - } - ] - } -- uri: /v2/deployments - method: GET - content: | - [ - { - "affectedApps": [ - "/test" - ], - "id": "867ed450-f6a8-4d33-9b0e-e11c5513990b", - "steps": [ - [ - { - "action": "ScaleApplication", - "app": "/test" - } - ] - ], - "currentActions": [ - { - "action": "ScaleApplication", - "app": "/test" - } - ], - "version": "2014-08-26T08:18:03.595Z", - "currentStep": 1, - "totalSteps": 1 - } - ] -- uri: /v2/deployments/867ed450-f6a8-4d33-9b0e-e11c5513990b - method: DELETE - content: | - { - "deploymentId": "0b1467fc-d5cd-4bbc-bac2-2805351cee1e", - "version": "2014-08-26T08:20:26.171Z" - } -- uri: /v2/eventSubscriptions - method: POST - content: | - { - "callbackUrl": "http://localhost:9292/callback", - "clientIp": "0:0:0:0:0:0:0:1", - "eventType": "subscribe_event" - } -- uri: /v2/eventSubscriptions - method: GET - content: | - { - "callbackUrls": [ - "http://localhost:9292/callback" - ] - } -- uri: /v2/eventSubscriptions?callbackUrl=http://localhost:9292/callback - method: DELETE - content: | - { - "callbackUrl": "http://localhost:9292/callback", - "clientIp": "0:0:0:0:0:0:0:1", - "eventType": "unsubscribe_event" - } -- uri: /v2/queue - method: GET - content: | - { - "queue": [ - { - "app": { - "args": null, - "backoffFactor": 1.15, - "backoffSeconds": 1, - "cmd": "python toggle.py $PORT0", - "constraints": [], - "container": null, - "cpus": 0.2, - "dependencies": [], - "disk": 0.0, - "env": {}, - "executor": "", - "healthChecks": [], - "id": "/test", - "instances": 3, - "mem": 32.0, - "ports": [10000], - "requirePorts": false, - "storeUrls": [], - "upgradeStrategy": { - "minimumHealthCapacity": 1.0 - }, - "uris": [ - "http://downloads.mesosphere.com/misc/toggle.tgz" - ], - "user": null, - "version": "2014-08-26T05:04:49.766Z" - }, - "delay": { "overdue": true } - } - ] - } -- uri: /v2/leader - method: GET - content: | - { - "leader": "127.0.0.1:8080" - } -- uri: /v2/leader - method: DELETE - content: | - { - "message": "Leadership abdicted" - } -- uri: /v2/info - method: GET - content: | - { - "frameworkId": "20140730-222531-1863654316-5050-10422-0000", - "leader": "127.0.0.1:8080", - "http_config": { - "assets_path": null, - "http_port": 8080, - "https_port": 8443 - }, - "event_subscriber": { - "type": "http_callback", - "http_endpoints": [ - "localhost:9999/events" - ] - }, - "marathon_config": { - "checkpoint": false, - "executor": "//cmd", - "failover_timeout": 604800, - "ha": true, - "hostname": "127.0.0.1", - "local_port_max": 49151, - "local_port_min": 32767, - "master": "zk://localhost:2181/mesos", - "mesos_role": null, - "mesos_user": "root", - "reconciliation_initial_delay": 30000, - "reconciliation_interval": 30000, - "task_launch_timeout": 60000 - }, - "name": "marathon", - "version": "0.7.0-SNAPSHOT", - "zookeeper_config": { - "zk": "zk://localhost:2181/marathon", - "zk_future_timeout": { - "duration": 10 - }, - "zk_hosts": "localhost:2181", - "zk_path": "/marathon", - "zk_state": "/marathon", - "zk_timeout": 10 - } - } diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/rest-api.rb b/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/rest-api.rb deleted file mode 100644 index ed853cf97..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/tests/rest-api/rest-api.rb +++ /dev/null @@ -1,33 +0,0 @@ -#!/usr/bin/env ruby -# -# Copyright 2014 Rohith All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -require 'yaml' - -module RestAPI - class Application < Sinatra::Base - enable :logging, :raise_errors - - YAML.load(File.read("methods.yml")).each do |rest| - method = rest['method'].downcase - if method =~ /^(post|get|delete|put)$/ - send(method.to_sym, rest['uri']) do - content_type :json - rest['content'] - end - end - end - end -end diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/update_strategy.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/update_strategy.go deleted file mode 100644 index bc14ab5e9..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/update_strategy.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -type UpgradeStrategy struct { - MinimumHealthCapacity float64 `json:"minimumHealthCapacity,omitempty"` - MaximumOverCapacity float64 `json:"maximumOverCapacity,omitempty"` -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/utils.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/utils.go deleted file mode 100644 index c63996ef0..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/utils.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "errors" - "fmt" - "net" - "strings" - "sync/atomic" - "time" -) - -type AtomicSwitch int64 - -func (p *AtomicSwitch) IsSwitched() bool { - return atomic.LoadInt64((*int64)(p)) != 0 -} - -func (p *AtomicSwitch) SwitchOn() { - atomic.StoreInt64((*int64)(p), 1) -} - -func (p *AtomicSwitch) SwitchedOff() { - atomic.StoreInt64((*int64)(p), 0) -} - -func validateID(id string) string { - if !strings.HasPrefix(id, "/") { - return fmt.Sprintf("/%s", id) - } - return id -} - -func trimRootPath(id string) string { - if strings.HasPrefix(id, "/") { - return strings.TrimPrefix(id, "/") - } - return id -} - -func deadline(timeout time.Duration, work func(chan bool) error) error { - result := make(chan error) - timer := time.After(timeout) - stop_channel := make(chan bool) - - // allow the method to attempt - go func() { - result <- work(stop_channel) - }() - for { - select { - case err := <-result: - return err - case <-timer: - stop_channel <- true - return ErrTimeoutError - } - } -} - -func getInterfaceAddress(name string) (string, error) { - if interfaces, err := net.Interfaces(); err != nil { - return "", err - } else { - for _, iface := range interfaces { - /* step: get only the interface we're interested in */ - if iface.Name == name { - addrs, err := iface.Addrs() - if err != nil { - return "", err - } - /* step: return the first address */ - if len(addrs) > 0 { - return strings.SplitN(addrs[0].String(), "/", 2)[0], nil - } - } - } - } - return "", errors.New("Unable to determine or find the interface") -} - -func contains(elements []string, value string) bool { - for _, element := range elements { - if element == value { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/utils_test.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/utils_test.go deleted file mode 100644 index ea956e0a5..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/utils_test.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestValidateID(t *testing.T) { - path := "test/path" - assert.Equal(t, validateID(path), "/test/path") - path = "/test/path" - assert.Equal(t, validateID(path), "/test/path") -} - -func TestTrimRootPath(t *testing.T) { - path := "/test/path" - assert.Equal(t, trimRootPath(path), "test/path") - path = "test/path" - assert.Equal(t, trimRootPath(path), "test/path") -} diff --git a/Godeps/_workspace/src/github.com/gambol99/go-marathon/version.go b/Godeps/_workspace/src/github.com/gambol99/go-marathon/version.go deleted file mode 100644 index 437efebfb..000000000 --- a/Godeps/_workspace/src/github.com/gambol99/go-marathon/version.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 Rohith All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package marathon - -const VERSION = "0.0.2" diff --git a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml deleted file mode 100644 index 6796581fb..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE b/Godeps/_workspace/src/github.com/gorilla/context/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/context/README.md b/Godeps/_workspace/src/github.com/gorilla/context/README.md deleted file mode 100644 index c60a31b05..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/README.md +++ /dev/null @@ -1,7 +0,0 @@ -context -======= -[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context) - -gorilla/context is a general purpose registry for global request variables. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context.go b/Godeps/_workspace/src/github.com/gorilla/context/context.go deleted file mode 100644 index 81cb128b1..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/context.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "sync" - "time" -) - -var ( - mutex sync.RWMutex - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) -) - -// Set stores a value for a given key in a given request. -func Set(r *http.Request, key, val interface{}) { - mutex.Lock() - if data[r] == nil { - data[r] = make(map[interface{}]interface{}) - datat[r] = time.Now().Unix() - } - data[r][key] = val - mutex.Unlock() -} - -// Get returns a value stored for a given key in a given request. -func Get(r *http.Request, key interface{}) interface{} { - mutex.RLock() - if ctx := data[r]; ctx != nil { - value := ctx[key] - mutex.RUnlock() - return value - } - mutex.RUnlock() - return nil -} - -// GetOk returns stored value and presence state like multi-value return of map access. -func GetOk(r *http.Request, key interface{}) (interface{}, bool) { - mutex.RLock() - if _, ok := data[r]; ok { - value, ok := data[r][key] - mutex.RUnlock() - return value, ok - } - mutex.RUnlock() - return nil, false -} - -// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests. -func GetAll(r *http.Request) map[interface{}]interface{} { - mutex.RLock() - if context, ok := data[r]; ok { - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result - } - mutex.RUnlock() - return nil -} - -// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if -// the request was registered. -func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) { - mutex.RLock() - context, ok := data[r] - result := make(map[interface{}]interface{}, len(context)) - for k, v := range context { - result[k] = v - } - mutex.RUnlock() - return result, ok -} - -// Delete removes a value stored for a given key in a given request. -func Delete(r *http.Request, key interface{}) { - mutex.Lock() - if data[r] != nil { - delete(data[r], key) - } - mutex.Unlock() -} - -// Clear removes all values stored for a given request. -// -// This is usually called by a handler wrapper to clean up request -// variables at the end of a request lifetime. See ClearHandler(). -func Clear(r *http.Request) { - mutex.Lock() - clear(r) - mutex.Unlock() -} - -// clear is Clear without the lock. -func clear(r *http.Request) { - delete(data, r) - delete(datat, r) -} - -// Purge removes request data stored for longer than maxAge, in seconds. -// It returns the amount of requests removed. -// -// If maxAge <= 0, all request data is removed. -// -// This is only used for sanity check: in case context cleaning was not -// properly set some request data can be kept forever, consuming an increasing -// amount of memory. In case this is detected, Purge() must be called -// periodically until the problem is fixed. -func Purge(maxAge int) int { - mutex.Lock() - count := 0 - if maxAge <= 0 { - count = len(data) - data = make(map[*http.Request]map[interface{}]interface{}) - datat = make(map[*http.Request]int64) - } else { - min := time.Now().Unix() - int64(maxAge) - for r := range data { - if datat[r] < min { - clear(r) - count++ - } - } - } - mutex.Unlock() - return count -} - -// ClearHandler wraps an http.Handler and clears request values at the end -// of a request lifetime. -func ClearHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - defer Clear(r) - h.ServeHTTP(w, r) - }) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/context_test.go b/Godeps/_workspace/src/github.com/gorilla/context/context_test.go deleted file mode 100644 index 9814c501e..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/context_test.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package context - -import ( - "net/http" - "testing" -) - -type keyType int - -const ( - key1 keyType = iota - key2 -) - -func TestContext(t *testing.T) { - assertEqual := func(val interface{}, exp interface{}) { - if val != exp { - t.Errorf("Expected %v, got %v.", exp, val) - } - } - - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - emptyR, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - - // Get() - assertEqual(Get(r, key1), nil) - - // Set() - Set(r, key1, "1") - assertEqual(Get(r, key1), "1") - assertEqual(len(data[r]), 1) - - Set(r, key2, "2") - assertEqual(Get(r, key2), "2") - assertEqual(len(data[r]), 2) - - //GetOk - value, ok := GetOk(r, key1) - assertEqual(value, "1") - assertEqual(ok, true) - - value, ok = GetOk(r, "not exists") - assertEqual(value, nil) - assertEqual(ok, false) - - Set(r, "nil value", nil) - value, ok = GetOk(r, "nil value") - assertEqual(value, nil) - assertEqual(ok, true) - - // GetAll() - values := GetAll(r) - assertEqual(len(values), 3) - - // GetAll() for empty request - values = GetAll(emptyR) - if values != nil { - t.Error("GetAll didn't return nil value for invalid request") - } - - // GetAllOk() - values, ok = GetAllOk(r) - assertEqual(len(values), 3) - assertEqual(ok, true) - - // GetAllOk() for empty request - values, ok = GetAllOk(emptyR) - assertEqual(value, nil) - assertEqual(ok, false) - - // Delete() - Delete(r, key1) - assertEqual(Get(r, key1), nil) - assertEqual(len(data[r]), 2) - - Delete(r, key2) - assertEqual(Get(r, key2), nil) - assertEqual(len(data[r]), 1) - - // Clear() - Clear(r) - assertEqual(len(data), 0) -} - -func parallelReader(r *http.Request, key string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Get(r, key) - } - done <- struct{}{} - -} - -func parallelWriter(r *http.Request, key, value string, iterations int, wait, done chan struct{}) { - <-wait - for i := 0; i < iterations; i++ { - Set(r, key, value) - } - done <- struct{}{} - -} - -func benchmarkMutex(b *testing.B, numReaders, numWriters, iterations int) { - - b.StopTimer() - r, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - done := make(chan struct{}) - b.StartTimer() - - for i := 0; i < b.N; i++ { - wait := make(chan struct{}) - - for i := 0; i < numReaders; i++ { - go parallelReader(r, "test", iterations, wait, done) - } - - for i := 0; i < numWriters; i++ { - go parallelWriter(r, "test", "123", iterations, wait, done) - } - - close(wait) - - for i := 0; i < numReaders+numWriters; i++ { - <-done - } - - } - -} - -func BenchmarkMutexSameReadWrite1(b *testing.B) { - benchmarkMutex(b, 1, 1, 32) -} -func BenchmarkMutexSameReadWrite2(b *testing.B) { - benchmarkMutex(b, 2, 2, 32) -} -func BenchmarkMutexSameReadWrite4(b *testing.B) { - benchmarkMutex(b, 4, 4, 32) -} -func BenchmarkMutex1(b *testing.B) { - benchmarkMutex(b, 2, 8, 32) -} -func BenchmarkMutex2(b *testing.B) { - benchmarkMutex(b, 16, 4, 64) -} -func BenchmarkMutex3(b *testing.B) { - benchmarkMutex(b, 1, 2, 128) -} -func BenchmarkMutex4(b *testing.B) { - benchmarkMutex(b, 128, 32, 256) -} -func BenchmarkMutex5(b *testing.B) { - benchmarkMutex(b, 1024, 2048, 64) -} -func BenchmarkMutex6(b *testing.B) { - benchmarkMutex(b, 2048, 1024, 512) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/context/doc.go b/Godeps/_workspace/src/github.com/gorilla/context/doc.go deleted file mode 100644 index 73c740031..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/context/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package context stores values shared during a request lifetime. - -For example, a router can set variables extracted from the URL and later -application handlers can access those values, or it can be used to store -sessions values to be saved at the end of a request. There are several -others common uses. - -The idea was posted by Brad Fitzpatrick to the go-nuts mailing list: - - http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53 - -Here's the basic usage: first define the keys that you will need. The key -type is interface{} so a key can be of any type that supports equality. -Here we define a key using a custom int type to avoid name collisions: - - package foo - - import ( - "github.com/gorilla/context" - ) - - type key int - - const MyKey key = 0 - -Then set a variable. Variables are bound to an http.Request object, so you -need a request instance to set a value: - - context.Set(r, MyKey, "bar") - -The application can later access the variable using the same key you provided: - - func MyHandler(w http.ResponseWriter, r *http.Request) { - // val is "bar". - val := context.Get(r, foo.MyKey) - - // returns ("bar", true) - val, ok := context.GetOk(r, foo.MyKey) - // ... - } - -And that's all about the basic usage. We discuss some other ideas below. - -Any type can be stored in the context. To enforce a given type, make the key -private and wrap Get() and Set() to accept and return values of a specific -type: - - type key int - - const mykey key = 0 - - // GetMyKey returns a value for this package from the request values. - func GetMyKey(r *http.Request) SomeType { - if rv := context.Get(r, mykey); rv != nil { - return rv.(SomeType) - } - return nil - } - - // SetMyKey sets a value for this package in the request values. - func SetMyKey(r *http.Request, val SomeType) { - context.Set(r, mykey, val) - } - -Variables must be cleared at the end of a request, to remove all values -that were stored. This can be done in an http.Handler, after a request was -served. Just call Clear() passing the request: - - context.Clear(r) - -...or use ClearHandler(), which conveniently wraps an http.Handler to clear -variables at the end of a request lifetime. - -The Routers from the packages gorilla/mux and gorilla/pat call Clear() -so if you are using either of them you don't need to clear the context manually. -*/ -package context diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml deleted file mode 100644 index f983b60c6..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -sudo: false - -go: - - 1.3 - - 1.4 - - 1.5 - - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/LICENSE b/Godeps/_workspace/src/github.com/gorilla/handlers/LICENSE deleted file mode 100644 index 66ea3c8ae..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/README.md b/Godeps/_workspace/src/github.com/gorilla/handlers/README.md deleted file mode 100644 index a340abe08..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/README.md +++ /dev/null @@ -1,52 +0,0 @@ -gorilla/handlers -================ -[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) - -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's `net/http` package (or any framework supporting `http.Handler`), including: - -* `LoggingHandler` for logging HTTP requests in the Apache [Common Log - Format](http://httpd.apache.org/docs/2.2/logs.html#common). -* `CombinedLoggingHandler` for logging HTTP requests in the Apache [Combined Log - Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by - both Apache and nginx. -* `CompressHandler` for gzipping responses. -* `ContentTypeHandler` for validating requests against a list of accepted - content types. -* `MethodHandler` for matching HTTP methods against handlers in a - `map[string]http.Handler` -* `ProxyHeaders` for populating `r.RemoteAddr` and `r.URL.Scheme` based on the - `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` - headers when running a Go server behind a HTTP reverse proxy. -* `CanonicalHost` for re-directing to the preferred host when handling multiple - domains (i.e. multiple CNAME aliases). - -Other handlers are documented [on the Gorilla -website](http://www.gorillatoolkit.org/pkg/handlers). - -## Example - -A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: - -```go -import ( - "net/http" - "github.com/gorilla/handlers" -) - -func main() { - r := http.NewServeMux() - - // Only log requests to our admin dashboard to stdout - r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) - r.HandleFunc("/", ShowIndex) - - // Wrap our server with our gzip handler to gzip compress all responses. - http.ListenAndServe(":8000", handlers.CompressHandler(r)) -} -``` - -## License - -BSD licensed. See the included LICENSE file for details. - diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/canonical.go b/Godeps/_workspace/src/github.com/gorilla/handlers/canonical.go deleted file mode 100644 index 3be4e8262..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/canonical.go +++ /dev/null @@ -1,71 +0,0 @@ -package handlers - -import ( - "net/http" - "net/url" - "strings" -) - -type canonical struct { - h http.Handler - domain string - code int -} - -// CanonicalHost is HTTP middleware that re-directs requests to the canonical -// domain. It accepts a domain and a status code (e.g. 301 or 302) and -// re-directs clients to this domain. The existing request path is maintained. -// -// Note: If the provided domain is considered invalid by url.Parse or otherwise -// returns an empty scheme or host, clients are not re-directed. -// -// Example: -// -// r := mux.NewRouter() -// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) -// r.HandleFunc("/route", YourHandler) -// -// log.Fatal(http.ListenAndServe(":7000", canonical(r))) -// -func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { - fn := func(h http.Handler) http.Handler { - return canonical{h, domain, code} - } - - return fn -} - -func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { - dest, err := url.Parse(c.domain) - if err != nil { - // Call the next handler if the provided domain fails to parse. - c.h.ServeHTTP(w, r) - return - } - - if dest.Scheme == "" || dest.Host == "" { - // Call the next handler if the scheme or host are empty. - // Note that url.Parse won't fail on in this case. - c.h.ServeHTTP(w, r) - return - } - - if !strings.EqualFold(cleanHost(r.Host), dest.Host) { - // Re-build the destination URL - dest := dest.Scheme + "://" + dest.Host + r.URL.Path - http.Redirect(w, r, dest, c.code) - return - } - - c.h.ServeHTTP(w, r) -} - -// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. -// This is backported from Go 1.5 (in response to issue #11206) and attempts to -// mitigate malformed Host headers that do not match the format in RFC7230. -func cleanHost(in string) string { - if i := strings.IndexAny(in, " /"); i != -1 { - return in[:i] - } - return in -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/canonical_test.go b/Godeps/_workspace/src/github.com/gorilla/handlers/canonical_test.go deleted file mode 100644 index af8c6303d..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/canonical_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package handlers - -import ( - "bufio" - "bytes" - "log" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestCleanHost(t *testing.T) { - tests := []struct { - in, want string - }{ - {"www.google.com", "www.google.com"}, - {"www.google.com foo", "www.google.com"}, - {"www.google.com/foo", "www.google.com"}, - {" first character is a space", ""}, - } - for _, tt := range tests { - got := cleanHost(tt.in) - if tt.want != got { - t.Errorf("cleanHost(%q) = %q, want %q", tt.in, got, tt.want) - } - } -} - -func TestCanonicalHost(t *testing.T) { - gorilla := "http://www.gorillatoolkit.org" - - rr := httptest.NewRecorder() - r := newRequest("GET", "http://www.example.com/") - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) - - // Test a re-direct: should return a 302 Found. - CanonicalHost(gorilla, http.StatusFound)(testHandler).ServeHTTP(rr, r) - - if rr.Code != http.StatusFound { - t.Fatalf("bad status: got %v want %v", rr.Code, http.StatusFound) - } - - if rr.Header().Get("Location") != gorilla+r.URL.Path { - t.Fatalf("bad re-direct: got %q want %q", rr.Header().Get("Location"), gorilla+r.URL.Path) - } - -} - -func TestBadDomain(t *testing.T) { - rr := httptest.NewRecorder() - r := newRequest("GET", "http://www.example.com/") - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) - - // Test a bad domain - should return 200 OK. - CanonicalHost("%", http.StatusFound)(testHandler).ServeHTTP(rr, r) - - if rr.Code != http.StatusOK { - t.Fatalf("bad status: got %v want %v", rr.Code, http.StatusOK) - } -} - -func TestEmptyHost(t *testing.T) { - rr := httptest.NewRecorder() - r := newRequest("GET", "http://www.example.com/") - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}) - - // Test a domain that returns an empty url.Host from url.Parse. - CanonicalHost("hello.com", http.StatusFound)(testHandler).ServeHTTP(rr, r) - - if rr.Code != http.StatusOK { - t.Fatalf("bad status: got %v want %v", rr.Code, http.StatusOK) - } -} - -func TestHeaderWrites(t *testing.T) { - gorilla := "http://www.gorillatoolkit.org" - - testHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - }) - - // Catch the log output to ensure we don't write multiple headers. - var b bytes.Buffer - buf := bufio.NewWriter(&b) - tl := log.New(buf, "test: ", log.Lshortfile) - - srv := httptest.NewServer( - CanonicalHost(gorilla, http.StatusFound)(testHandler)) - defer srv.Close() - srv.Config.ErrorLog = tl - - _, err := http.Get(srv.URL) - if err != nil { - t.Fatal(err) - } - - err = buf.Flush() - if err != nil { - t.Fatal(err) - } - - // We rely on the error not changing: net/http does not export it. - if strings.Contains(b.String(), "multiple response.WriteHeader calls") { - t.Fatalf("re-direct did not return early: multiple header writes") - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go b/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go deleted file mode 100644 index 67c4077b5..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/compress.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "compress/flate" - "compress/gzip" - "io" - "net/http" - "strings" -) - -type compressResponseWriter struct { - io.Writer - http.ResponseWriter - http.Hijacker -} - -func (w *compressResponseWriter) WriteHeader(c int) { - w.ResponseWriter.Header().Del("Content-Length") - w.ResponseWriter.WriteHeader(c) -} - -func (w *compressResponseWriter) Header() http.Header { - return w.ResponseWriter.Header() -} - -func (w *compressResponseWriter) Write(b []byte) (int, error) { - h := w.ResponseWriter.Header() - if h.Get("Content-Type") == "" { - h.Set("Content-Type", http.DetectContentType(b)) - } - h.Del("Content-Length") - - return w.Writer.Write(b) -} - -// CompressHandler gzip compresses HTTP responses for clients that support it -// via the 'Accept-Encoding' header. -func CompressHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - L: - for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") { - switch strings.TrimSpace(enc) { - case "gzip": - w.Header().Set("Content-Encoding", "gzip") - w.Header().Add("Vary", "Accept-Encoding") - - gw := gzip.NewWriter(w) - defer gw.Close() - - h, hok := w.(http.Hijacker) - if !hok { /* w is not Hijacker... oh well... */ - h = nil - } - - w = &compressResponseWriter{ - Writer: gw, - ResponseWriter: w, - Hijacker: h, - } - - break L - case "deflate": - w.Header().Set("Content-Encoding", "deflate") - w.Header().Add("Vary", "Accept-Encoding") - - fw, _ := flate.NewWriter(w, flate.DefaultCompression) - defer fw.Close() - - h, hok := w.(http.Hijacker) - if !hok { /* w is not Hijacker... oh well... */ - h = nil - } - - w = &compressResponseWriter{ - Writer: fw, - ResponseWriter: w, - Hijacker: h, - } - - break L - } - } - - h.ServeHTTP(w, r) - }) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go b/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go deleted file mode 100644 index ce110682b..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/compress_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "io" - "net/http" - "net/http/httptest" - "strconv" - "testing" -) - -func compressedRequest(w *httptest.ResponseRecorder, compression string) { - CompressHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Length", strconv.Itoa(9*1024)) - for i := 0; i < 1024; i++ { - io.WriteString(w, "Gorilla!\n") - } - })).ServeHTTP(w, &http.Request{ - Method: "GET", - Header: http.Header{ - "Accept-Encoding": []string{compression}, - }, - }) - -} - -func TestCompressHandlerNoCompression(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "") - if enc := w.HeaderMap.Get("Content-Encoding"); enc != "" { - t.Errorf("wrong content encoding, got %q want %q", enc, "") - } - if ct := w.HeaderMap.Get("Content-Type"); ct != "" { - t.Errorf("wrong content type, got %q want %q", ct, "") - } - if w.Body.Len() != 1024*9 { - t.Errorf("wrong len, got %d want %d", w.Body.Len(), 1024*9) - } - if l := w.HeaderMap.Get("Content-Length"); l != "9216" { - t.Errorf("wrong content-length. got %q expected %d", l, 1024*9) - } -} - -func TestCompressHandlerGzip(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "gzip") - if w.HeaderMap.Get("Content-Encoding") != "gzip" { - t.Errorf("wrong content encoding, got %q want %q", w.HeaderMap.Get("Content-Encoding"), "gzip") - } - if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" { - t.Errorf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8") - } - if w.Body.Len() != 72 { - t.Errorf("wrong len, got %d want %d", w.Body.Len(), 72) - } - if l := w.HeaderMap.Get("Content-Length"); l != "" { - t.Errorf("wrong content-length. got %q expected %q", l, "") - } -} - -func TestCompressHandlerDeflate(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "deflate") - if w.HeaderMap.Get("Content-Encoding") != "deflate" { - t.Fatalf("wrong content encoding, got %q want %q", w.HeaderMap.Get("Content-Encoding"), "deflate") - } - if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" { - t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8") - } - if w.Body.Len() != 54 { - t.Fatalf("wrong len, got %d want %d", w.Body.Len(), 54) - } -} - -func TestCompressHandlerGzipDeflate(t *testing.T) { - w := httptest.NewRecorder() - compressedRequest(w, "gzip, deflate ") - if w.HeaderMap.Get("Content-Encoding") != "gzip" { - t.Fatalf("wrong content encoding, got %q want %q", w.HeaderMap.Get("Content-Encoding"), "gzip") - } - if w.HeaderMap.Get("Content-Type") != "text/plain; charset=utf-8" { - t.Fatalf("wrong content type, got %s want %s", w.HeaderMap.Get("Content-Type"), "text/plain; charset=utf-8") - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/doc.go b/Godeps/_workspace/src/github.com/gorilla/handlers/doc.go deleted file mode 100644 index 944e5a8ae..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -/* -Package handlers is a collection of handlers (aka "HTTP middleware") for use -with Go's net/http package (or any framework supporting http.Handler). - -The package includes handlers for logging in standardised formats, compressing -HTTP responses, validating content types and other useful tools for manipulating -requests and responses. -*/ -package handlers diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go b/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go deleted file mode 100644 index 552bfc2ff..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/handlers.go +++ /dev/null @@ -1,388 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "bufio" - "fmt" - "io" - "net" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -// MethodHandler is an http.Handler that dispatches to a handler whose key in the MethodHandler's -// map matches the name of the HTTP request's method, eg: GET -// -// If the request's method is OPTIONS and OPTIONS is not a key in the map then the handler -// responds with a status of 200 and sets the Allow header to a comma-separated list of -// available methods. -// -// If the request's method doesn't match any of its keys the handler responds with -// a status of 405, Method not allowed and sets the Allow header to a comma-separated list -// of available methods. -type MethodHandler map[string]http.Handler - -func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if handler, ok := h[req.Method]; ok { - handler.ServeHTTP(w, req) - } else { - allow := []string{} - for k := range h { - allow = append(allow, k) - } - sort.Strings(allow) - w.Header().Set("Allow", strings.Join(allow, ", ")) - if req.Method == "OPTIONS" { - w.WriteHeader(http.StatusOK) - } else { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } - } -} - -// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends -type loggingHandler struct { - writer io.Writer - handler http.Handler -} - -// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo and its friends -type combinedLoggingHandler struct { - writer io.Writer - handler http.Handler -} - -func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger := makeLogger(w) - url := *req.URL - h.handler.ServeHTTP(logger, req) - writeLog(h.writer, req, url, t, logger.Status(), logger.Size()) -} - -func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { - t := time.Now() - logger := makeLogger(w) - url := *req.URL - h.handler.ServeHTTP(logger, req) - writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size()) -} - -func makeLogger(w http.ResponseWriter) loggingResponseWriter { - var logger loggingResponseWriter = &responseLogger{w: w} - if _, ok := w.(http.Hijacker); ok { - logger = &hijackLogger{responseLogger{w: w}} - } - h, ok1 := logger.(http.Hijacker) - c, ok2 := w.(http.CloseNotifier) - if ok1 && ok2 { - return hijackCloseNotifier{logger, h, c} - } - if ok2 { - return &closeNotifyWriter{logger, c} - } - return logger -} - -type loggingResponseWriter interface { - http.ResponseWriter - http.Flusher - Status() int - Size() int -} - -// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP status -// code and body size -type responseLogger struct { - w http.ResponseWriter - status int - size int -} - -func (l *responseLogger) Header() http.Header { - return l.w.Header() -} - -func (l *responseLogger) Write(b []byte) (int, error) { - if l.status == 0 { - // The status will be StatusOK if WriteHeader has not been called yet - l.status = http.StatusOK - } - size, err := l.w.Write(b) - l.size += size - return size, err -} - -func (l *responseLogger) WriteHeader(s int) { - l.w.WriteHeader(s) - l.status = s -} - -func (l *responseLogger) Status() int { - return l.status -} - -func (l *responseLogger) Size() int { - return l.size -} - -func (l *responseLogger) Flush() { - f, ok := l.w.(http.Flusher) - if ok { - f.Flush() - } -} - -type hijackLogger struct { - responseLogger -} - -func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { - h := l.responseLogger.w.(http.Hijacker) - conn, rw, err := h.Hijack() - if err == nil && l.responseLogger.status == 0 { - // The status will be StatusSwitchingProtocols if there was no error and WriteHeader has not been called yet - l.responseLogger.status = http.StatusSwitchingProtocols - } - return conn, rw, err -} - -type closeNotifyWriter struct { - loggingResponseWriter - http.CloseNotifier -} - -type hijackCloseNotifier struct { - loggingResponseWriter - http.Hijacker - http.CloseNotifier -} - -const lowerhex = "0123456789abcdef" - -func appendQuoted(buf []byte, s string) []byte { - var runeTmp [utf8.UTFMax]byte - for width := 0; len(s) > 0; s = s[width:] { - r := rune(s[0]) - width = 1 - if r >= utf8.RuneSelf { - r, width = utf8.DecodeRuneInString(s) - } - if width == 1 && r == utf8.RuneError { - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - continue - } - if r == rune('"') || r == '\\' { // always backslashed - buf = append(buf, '\\') - buf = append(buf, byte(r)) - continue - } - if strconv.IsPrint(r) { - n := utf8.EncodeRune(runeTmp[:], r) - buf = append(buf, runeTmp[:n]...) - continue - } - switch r { - case '\a': - buf = append(buf, `\a`...) - case '\b': - buf = append(buf, `\b`...) - case '\f': - buf = append(buf, `\f`...) - case '\n': - buf = append(buf, `\n`...) - case '\r': - buf = append(buf, `\r`...) - case '\t': - buf = append(buf, `\t`...) - case '\v': - buf = append(buf, `\v`...) - default: - switch { - case r < ' ': - buf = append(buf, `\x`...) - buf = append(buf, lowerhex[s[0]>>4]) - buf = append(buf, lowerhex[s[0]&0xF]) - case r > utf8.MaxRune: - r = 0xFFFD - fallthrough - case r < 0x10000: - buf = append(buf, `\u`...) - for s := 12; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - default: - buf = append(buf, `\U`...) - for s := 28; s >= 0; s -= 4 { - buf = append(buf, lowerhex[r>>uint(s)&0xF]) - } - } - } - } - return buf - -} - -// buildCommonLogLine builds a log entry for req in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { - username := "-" - if url.User != nil { - if name := url.User.Username(); name != "" { - username = name - } - } - - host, _, err := net.SplitHostPort(req.RemoteAddr) - - if err != nil { - host = req.RemoteAddr - } - - uri := url.RequestURI() - - buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) - buf = append(buf, host...) - buf = append(buf, " - "...) - buf = append(buf, username...) - buf = append(buf, " ["...) - buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) - buf = append(buf, `] "`...) - buf = append(buf, req.Method...) - buf = append(buf, " "...) - buf = appendQuoted(buf, uri) - buf = append(buf, " "...) - buf = append(buf, req.Proto...) - buf = append(buf, `" `...) - buf = append(buf, strconv.Itoa(status)...) - buf = append(buf, " "...) - buf = append(buf, strconv.Itoa(size)...) - return buf -} - -// writeLog writes a log entry for req to w in Apache Common Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { - buf := buildCommonLogLine(req, url, ts, status, size) - buf = append(buf, '\n') - w.Write(buf) -} - -// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. -// ts is the timestamp with which the entry should be logged. -// status and size are used to provide the response HTTP status and size. -func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { - buf := buildCommonLogLine(req, url, ts, status, size) - buf = append(buf, ` "`...) - buf = appendQuoted(buf, req.Referer()) - buf = append(buf, `" "`...) - buf = appendQuoted(buf, req.UserAgent()) - buf = append(buf, '"', '\n') - w.Write(buf) -} - -// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Combined Log Format. -// -// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { - return combinedLoggingHandler{out, h} -} - -// LoggingHandler return a http.Handler that wraps h and logs requests to out in -// Apache Common Log Format (CLF). -// -// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. -// -// LoggingHandler always sets the ident field of the log to - -// -// Example: -// -// r := mux.NewRouter() -// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { -// w.Write([]byte("This is a catch-all route")) -// }) -// loggedRouter := handlers.LoggingHandler(os.Stdout, r) -// http.ListenAndServe(":1123", loggedRouter) -// -func LoggingHandler(out io.Writer, h http.Handler) http.Handler { - return loggingHandler{out, h} -} - -// isContentType validates the Content-Type header -// is contentType. That is, its type and subtype match. -func isContentType(h http.Header, contentType string) bool { - ct := h.Get("Content-Type") - if i := strings.IndexRune(ct, ';'); i != -1 { - ct = ct[0:i] - } - return ct == contentType -} - -// ContentTypeHandler wraps and returns a http.Handler, validating the request content type -// is acompatible with the contentTypes list. -// It writes a HTTP 415 error if that fails. -// -// Only PUT, POST, and PATCH requests are considered. -func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { - h.ServeHTTP(w, r) - return - } - - for _, ct := range contentTypes { - if isContentType(r.Header, ct) { - h.ServeHTTP(w, r) - return - } - } - http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) - }) -} - -const ( - // HTTPMethodOverrideHeader is a commonly used - // http header to override a request method. - HTTPMethodOverrideHeader = "X-HTTP-Method-Override" - // HTTPMethodOverrideFormKey is a commonly used - // HTML form key to override a request method. - HTTPMethodOverrideFormKey = "_method" -) - -// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for the X-HTTP-Method-Override header -// or the _method form key, and overrides (if valid) request.Method with its value. -// -// This is especially useful for http clients that don't support many http verbs. -// It isn't secure to override e.g a GET to a POST, so only POST requests are considered. -// Likewise, the override method can only be a "write" method: PUT, PATCH or DELETE. -// -// Form method takes precedence over header method. -func HTTPMethodOverrideHandler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == "POST" { - om := r.FormValue(HTTPMethodOverrideFormKey) - if om == "" { - om = r.Header.Get(HTTPMethodOverrideHeader) - } - if om == "PUT" || om == "PATCH" || om == "DELETE" { - r.Method = om - } - } - h.ServeHTTP(w, r) - }) -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go b/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go deleted file mode 100644 index 94eeb0350..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/handlers_test.go +++ /dev/null @@ -1,305 +0,0 @@ -// Copyright 2013 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package handlers - -import ( - "bytes" - "net" - "net/http" - "net/http/httptest" - "net/url" - "strings" - "testing" - "time" -) - -const ( - ok = "ok\n" - notAllowed = "Method not allowed\n" -) - -var okHandler = http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte(ok)) -}) - -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} - -func TestMethodHandler(t *testing.T) { - tests := []struct { - req *http.Request - handler http.Handler - code int - allow string // Contents of the Allow header - body string - }{ - // No handlers - {newRequest("GET", "/foo"), MethodHandler{}, http.StatusMethodNotAllowed, "", notAllowed}, - {newRequest("OPTIONS", "/foo"), MethodHandler{}, http.StatusOK, "", ""}, - - // A single handler - {newRequest("GET", "/foo"), MethodHandler{"GET": okHandler}, http.StatusOK, "", ok}, - {newRequest("POST", "/foo"), MethodHandler{"GET": okHandler}, http.StatusMethodNotAllowed, "GET", notAllowed}, - - // Multiple handlers - {newRequest("GET", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok}, - {newRequest("POST", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "", ok}, - {newRequest("DELETE", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusMethodNotAllowed, "GET, POST", notAllowed}, - {newRequest("OPTIONS", "/foo"), MethodHandler{"GET": okHandler, "POST": okHandler}, http.StatusOK, "GET, POST", ""}, - - // Override OPTIONS - {newRequest("OPTIONS", "/foo"), MethodHandler{"OPTIONS": okHandler}, http.StatusOK, "", ok}, - } - - for i, test := range tests { - rec := httptest.NewRecorder() - test.handler.ServeHTTP(rec, test.req) - if rec.Code != test.code { - t.Fatalf("%d: wrong code, got %d want %d", i, rec.Code, test.code) - } - if allow := rec.HeaderMap.Get("Allow"); allow != test.allow { - t.Fatalf("%d: wrong Allow, got %s want %s", i, allow, test.allow) - } - if body := rec.Body.String(); body != test.body { - t.Fatalf("%d: wrong body, got %q want %q", i, body, test.body) - } - } -} - -func TestWriteLog(t *testing.T) { - loc, err := time.LoadLocation("Europe/Warsaw") - if err != nil { - panic(err) - } - ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc) - - // A typical request with an OK response - req := newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - - buf := new(bytes.Buffer) - writeLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log := buf.String() - - expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Request with an unauthorized user - req = newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - req.URL.User = url.User("kamil") - - buf.Reset() - writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500) - log = buf.String() - - expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Request with url encoded parameters - req = newRequest("GET", "http://example.com/test?abc=hello%20world&a=b%3F") - req.RemoteAddr = "192.168.100.5" - - buf.Reset() - writeLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log = buf.String() - - expected = "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET /test?abc=hello%20world&a=b%3F HTTP/1.1\" 200 100\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } -} - -func TestWriteCombinedLog(t *testing.T) { - loc, err := time.LoadLocation("Europe/Warsaw") - if err != nil { - panic(err) - } - ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc) - - // A typical request with an OK response - req := newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - req.Header.Set("Referer", "http://example.com") - req.Header.Set( - "User-Agent", - "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.33 "+ - "(KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33", - ) - - buf := new(bytes.Buffer) - writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log := buf.String() - - expected := "192.168.100.5 - - [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Request with an unauthorized user - req.URL.User = url.User("kamil") - - buf.Reset() - writeCombinedLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500) - log = buf.String() - - expected = "192.168.100.5 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 401 500 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Test with remote ipv6 address - req.RemoteAddr = "::1" - - buf.Reset() - writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log = buf.String() - - expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } - - // Test remote ipv6 addr, with port - req.RemoteAddr = net.JoinHostPort("::1", "65000") - - buf.Reset() - writeCombinedLog(buf, req, *req.URL, ts, http.StatusOK, 100) - log = buf.String() - - expected = "::1 - kamil [26/May/1983:03:30:45 +0200] \"GET / HTTP/1.1\" 200 100 \"http://example.com\" " + - "\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " + - "AppleWebKit/537.33 (KHTML, like Gecko) Chrome/27.0.1430.0 Safari/537.33\"\n" - if log != expected { - t.Fatalf("wrong log, got %q want %q", log, expected) - } -} - -func TestLogPathRewrites(t *testing.T) { - var buf bytes.Buffer - - handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - req.URL.Path = "/" // simulate http.StripPrefix and friends - w.WriteHeader(200) - }) - logger := LoggingHandler(&buf, handler) - - logger.ServeHTTP(httptest.NewRecorder(), newRequest("GET", "/subdir/asdf")) - - if !strings.Contains(buf.String(), "GET /subdir/asdf HTTP") { - t.Fatalf("Got log %#v, wanted substring %#v", buf.String(), "GET /subdir/asdf HTTP") - } -} - -func BenchmarkWriteLog(b *testing.B) { - loc, err := time.LoadLocation("Europe/Warsaw") - if err != nil { - b.Fatalf(err.Error()) - } - ts := time.Date(1983, 05, 26, 3, 30, 45, 0, loc) - - req := newRequest("GET", "http://example.com") - req.RemoteAddr = "192.168.100.5" - - b.ResetTimer() - - buf := &bytes.Buffer{} - for i := 0; i < b.N; i++ { - buf.Reset() - writeLog(buf, req, *req.URL, ts, http.StatusUnauthorized, 500) - } -} - -func TestContentTypeHandler(t *testing.T) { - tests := []struct { - Method string - AllowContentTypes []string - ContentType string - Code int - }{ - {"POST", []string{"application/json"}, "application/json", http.StatusOK}, - {"POST", []string{"application/json", "application/xml"}, "application/json", http.StatusOK}, - {"POST", []string{"application/json"}, "application/json; charset=utf-8", http.StatusOK}, - {"POST", []string{"application/json"}, "application/json+xxx", http.StatusUnsupportedMediaType}, - {"POST", []string{"application/json"}, "text/plain", http.StatusUnsupportedMediaType}, - {"GET", []string{"application/json"}, "", http.StatusOK}, - {"GET", []string{}, "", http.StatusOK}, - } - for _, test := range tests { - r, err := http.NewRequest(test.Method, "/", nil) - if err != nil { - t.Error(err) - continue - } - - h := ContentTypeHandler(okHandler, test.AllowContentTypes...) - r.Header.Set("Content-Type", test.ContentType) - w := httptest.NewRecorder() - h.ServeHTTP(w, r) - if w.Code != test.Code { - t.Errorf("expected %d, got %d", test.Code, w.Code) - } - } -} - -func TestHTTPMethodOverride(t *testing.T) { - var tests = []struct { - Method string - OverrideMethod string - ExpectedMethod string - }{ - {"POST", "PUT", "PUT"}, - {"POST", "PATCH", "PATCH"}, - {"POST", "DELETE", "DELETE"}, - {"PUT", "DELETE", "PUT"}, - {"GET", "GET", "GET"}, - {"HEAD", "HEAD", "HEAD"}, - {"GET", "PUT", "GET"}, - {"HEAD", "DELETE", "HEAD"}, - } - - for _, test := range tests { - h := HTTPMethodOverrideHandler(okHandler) - reqs := make([]*http.Request, 0, 2) - - rHeader, err := http.NewRequest(test.Method, "/", nil) - if err != nil { - t.Error(err) - } - rHeader.Header.Set(HTTPMethodOverrideHeader, test.OverrideMethod) - reqs = append(reqs, rHeader) - - f := url.Values{HTTPMethodOverrideFormKey: []string{test.OverrideMethod}} - rForm, err := http.NewRequest(test.Method, "/", strings.NewReader(f.Encode())) - if err != nil { - t.Error(err) - } - rForm.Header.Set("Content-Type", "application/x-www-form-urlencoded") - reqs = append(reqs, rForm) - - for _, r := range reqs { - w := httptest.NewRecorder() - h.ServeHTTP(w, r) - if r.Method != test.ExpectedMethod { - t.Errorf("Expected %s, got %s", test.ExpectedMethod, r.Method) - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers.go b/Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers.go deleted file mode 100644 index 268de9c6a..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers.go +++ /dev/null @@ -1,113 +0,0 @@ -package handlers - -import ( - "net/http" - "regexp" - "strings" -) - -var ( - // De-facto standard header keys. - xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") - xRealIP = http.CanonicalHeaderKey("X-Real-IP") - xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Scheme") -) - -var ( - // RFC7239 defines a new "Forwarded: " header designed to replace the - // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 - forwarded = http.CanonicalHeaderKey("Forwarded") - // Allows for a sub-match of the first value after 'for=' to the next - // comma, semi-colon or space. The match is case-insensitive. - forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) - // Allows for a sub-match for the first instance of scheme (http|https) - // prefixed by 'proto='. The match is case-insensitive. - protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) -) - -// ProxyHeaders inspects common reverse proxy headers and sets the corresponding -// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP -// for the remote (client) IP address, X-Forwarded-Proto for the scheme -// (http|https) and the RFC7239 Forwarded header, which may include both client -// IPs and schemes. -// -// NOTE: This middleware should only be used when behind a reverse -// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are -// configured not to) strip these headers from client requests, or where these -// headers are accepted "as is" from a remote client (e.g. when Go is not behind -// a proxy), can manifest as a vulnerability if your application uses these -// headers for validating the 'trustworthiness' of a request. -func ProxyHeaders(h http.Handler) http.Handler { - fn := func(w http.ResponseWriter, r *http.Request) { - // Set the remote IP with the value passed from the proxy. - if fwd := getIP(r); fwd != "" { - r.RemoteAddr = fwd - } - - // Set the scheme (proto) with the value passed from the proxy. - if scheme := getScheme(r); scheme != "" { - r.URL.Scheme = scheme - } - - // Call the next handler in the chain. - h.ServeHTTP(w, r) - } - - return http.HandlerFunc(fn) -} - -// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 -// Forwarded headers (in that order). -func getIP(r *http.Request) string { - var addr string - - if fwd := r.Header.Get(xForwardedFor); fwd != "" { - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ", ") - if s == -1 { - s = len(fwd) - } - addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) - } - } - - return addr -} - -// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 -// Forwarded headers (in that order). -func getScheme(r *http.Request) string { - var scheme string - - // Retrieve the scheme from X-Forwarded-Proto. - if proto := r.Header.Get(xForwardedProto); proto != "" { - scheme = strings.ToLower(proto) - } else if proto := r.Header.Get(forwarded); proto != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'proto=' capture, which we ignore. In the case of multiple proto - // parameters (invalid) we only extract the first. - if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { - scheme = strings.ToLower(match[1]) - } - } - - return scheme -} diff --git a/Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers_test.go b/Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers_test.go deleted file mode 100644 index 85282ef7d..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/handlers/proxy_headers_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package handlers - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -type headerTable struct { - key string // header key - val string // header val - expected string // expected result -} - -func TestGetIP(t *testing.T) { - headers := []headerTable{ - {xForwardedFor, "8.8.8.8", "8.8.8.8"}, // Single address - {xForwardedFor, "8.8.8.8, 8.8.4.4", "8.8.8.8"}, // Multiple - {xForwardedFor, "[2001:db8:cafe::17]:4711", "[2001:db8:cafe::17]:4711"}, // IPv6 address - {xForwardedFor, "", ""}, // None - {xRealIP, "8.8.8.8", "8.8.8.8"}, // Single address - {xRealIP, "8.8.8.8, 8.8.4.4", "8.8.8.8, 8.8.4.4"}, // Multiple - {xRealIP, "[2001:db8:cafe::17]:4711", "[2001:db8:cafe::17]:4711"}, // IPv6 address - {xRealIP, "", ""}, // None - {forwarded, `for="_gazonk"`, "_gazonk"}, // Hostname - {forwarded, `For="[2001:db8:cafe::17]:4711`, `[2001:db8:cafe::17]:4711`}, // IPv6 address - {forwarded, `for=192.0.2.60;proto=http;by=203.0.113.43`, `192.0.2.60`}, // Multiple params - {forwarded, `for=192.0.2.43, for=198.51.100.17`, "192.0.2.43"}, // Multiple params - {forwarded, `for="workstation.local",for=198.51.100.17`, "workstation.local"}, // Hostname - } - - for _, v := range headers { - req := &http.Request{ - Header: http.Header{ - v.key: []string{v.val}, - }} - res := getIP(req) - if res != v.expected { - t.Fatalf("wrong header for %s: got %s want %s", v.key, res, - v.expected) - } - } -} - -func TestGetScheme(t *testing.T) { - headers := []headerTable{ - {xForwardedProto, "https", "https"}, - {xForwardedProto, "http", "http"}, - {xForwardedProto, "HTTP", "http"}, - {forwarded, `For="[2001:db8:cafe::17]:4711`, ""}, // No proto - {forwarded, `for=192.0.2.43, for=198.51.100.17;proto=https`, "https"}, // Multiple params before proto - {forwarded, `for=172.32.10.15; proto=https;by=127.0.0.1`, "https"}, // Space before proto - {forwarded, `for=192.0.2.60;proto=http;by=203.0.113.43`, "http"}, // Multiple params - } - - for _, v := range headers { - req := &http.Request{ - Header: http.Header{ - v.key: []string{v.val}, - }, - } - res := getScheme(req) - if res != v.expected { - t.Fatalf("wrong header for %s: got %s want %s", v.key, res, - v.expected) - } - } -} - -// Test the middleware end-to-end -func TestProxyHeaders(t *testing.T) { - rr := httptest.NewRecorder() - r := newRequest("GET", "/") - - r.Header.Set(xForwardedFor, "8.8.8.8") - r.Header.Set(xForwardedProto, "https") - - var addr string - var proto string - ProxyHeaders(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - addr = r.RemoteAddr - proto = r.URL.Scheme - })).ServeHTTP(rr, r) - - if rr.Code != http.StatusOK { - t.Fatalf("bad status: got %d want %d", rr.Code, http.StatusOK) - } - - if addr != r.Header.Get(xForwardedFor) { - t.Fatalf("wrong address: got %s want %s", addr, - r.Header.Get(xForwardedFor)) - } - - if proto != r.Header.Get(xForwardedProto) { - t.Fatalf("wrong address: got %s want %s", proto, - r.Header.Get(xForwardedProto)) - } - -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml b/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml deleted file mode 100644 index d87d46576..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - tip diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE b/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE deleted file mode 100644 index 0e5fb8728..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 Rodrigo Moraes. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/README.md b/Godeps/_workspace/src/github.com/gorilla/mux/README.md deleted file mode 100644 index e60301b03..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/README.md +++ /dev/null @@ -1,7 +0,0 @@ -mux -=== -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -gorilla/mux is a powerful URL router and dispatcher. - -Read the full documentation here: http://www.gorillatoolkit.org/pkg/mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go deleted file mode 100644 index c5f97b2b2..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/bench_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "net/http" - "testing" -) - -func BenchmarkMux(b *testing.B) { - router := new(Router) - handler := func(w http.ResponseWriter, r *http.Request) {} - router.HandleFunc("/v1/{v1}", handler) - - request, _ := http.NewRequest("GET", "/v1/anything", nil) - for i := 0; i < b.N; i++ { - router.ServeHTTP(nil, request) - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go b/Godeps/_workspace/src/github.com/gorilla/mux/doc.go deleted file mode 100644 index 442babab8..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/doc.go +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. - -Let's start registering a couple of URL paths and handlers: - - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } - -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. - -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: - - r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) - -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): - - vars := mux.Vars(request) - category := vars["category"] - -And this is all you need to know about the basic usage. More advanced options -are explained below. - -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: - - r := mux.NewRouter() - // Only matches if domain is "www.domain.com". - r.Host("www.domain.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") - -There are several other matchers that can be added. To match path prefixes: - - r.PathPrefix("/products/") - -...or HTTP methods: - - r.Methods("GET", "POST") - -...or URL schemes: - - r.Schemes("https") - -...or header values: - - r.Headers("X-Requested-With", "XMLHttpRequest") - -...or query values: - - r.Queries("key", "value") - -...or to use a custom matcher function: - - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) - -...and finally, it is possible to combine several matchers in a single route: - - r.HandleFunc("/products", ProductsHandler). - Host("www.domain.com"). - Methods("GET"). - Schemes("http") - -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". - -For example, let's say we have several URLs that should only match when the -host is "www.domain.com". Create a route for that host and get a "subrouter" -from it: - - r := mux.NewRouter() - s := r.Host("www.domain.com").Subrouter() - -Then register routes in the subrouter: - - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) - -The three URL paths we registered above will only be tested if the domain is -"www.domain.com", because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. - -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. - -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: - - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) - -Now let's see how to build registered URLs. - -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: - - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") - -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: - - url, err := r.Get("article").URL("category", "technology", "id", "42") - -...and the result will be a url.URL with the following path: - - "/articles/technology/42" - -This also works for host variables: - - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") - -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. - -Regex support also exists for matching Headers within a route. For example, we could do: - - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` - -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: - - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") - - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") - -And if you use subrouters, host and path defined separately can be built -as well: - - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") -*/ -package mux diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux.go deleted file mode 100644 index e25323092..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/mux.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "path" - "regexp" - - "github.com/gorilla/context" -) - -// NewRouter returns a new router instance. -func NewRouter() *Router { - return &Router{namedRoutes: make(map[string]*Route), KeepContext: false} -} - -// Router registers routes to be matched and dispatches a handler. -// -// It implements the http.Handler interface, so it can be registered to serve -// requests: -// -// var router = mux.NewRouter() -// -// func main() { -// http.Handle("/", router) -// } -// -// Or, for Google App Engine, register it in a init() function: -// -// func init() { -// http.Handle("/", router) -// } -// -// This will send all incoming requests to the router. -type Router struct { - // Configurable Handler to be used when no route matches. - NotFoundHandler http.Handler - // Parent route, if this is a subrouter. - parent parentRoute - // Routes to be matched, in order. - routes []*Route - // Routes by name for URL building. - namedRoutes map[string]*Route - // See Router.StrictSlash(). This defines the flag for new routes. - strictSlash bool - // If true, do not clear the request context after handling the request - KeepContext bool -} - -// Match matches registered routes against the request. -func (r *Router) Match(req *http.Request, match *RouteMatch) bool { - for _, route := range r.routes { - if route.Match(req, match) { - return true - } - } - return false -} - -// ServeHTTP dispatches the handler registered in the matched route. -// -// When there is a match, the route variables can be retrieved calling -// mux.Vars(request). -func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { - // Clean path to canonical form and redirect. - if p := cleanPath(req.URL.Path); p != req.URL.Path { - - // Added 3 lines (Philip Schlump) - It was droping the query string and #whatever from query. - // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: - // http://code.google.com/p/go/issues/detail?id=5252 - url := *req.URL - url.Path = p - p = url.String() - - w.Header().Set("Location", p) - w.WriteHeader(http.StatusMovedPermanently) - return - } - var match RouteMatch - var handler http.Handler - if r.Match(req, &match) { - handler = match.Handler - setVars(req, match.Vars) - setCurrentRoute(req, match.Route) - } - if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } - } - if !r.KeepContext { - defer context.Clear(req) - } - handler.ServeHTTP(w, req) -} - -// Get returns a route registered with the given name. -func (r *Router) Get(name string) *Route { - return r.getNamedRoutes()[name] -} - -// GetRoute returns a route registered with the given name. This method -// was renamed to Get() and remains here for backwards compatibility. -func (r *Router) GetRoute(name string) *Route { - return r.getNamedRoutes()[name] -} - -// StrictSlash defines the trailing slash behavior for new routes. The initial -// value is false. -// -// When true, if the route path is "/path/", accessing "/path" will redirect -// to the former and vice versa. In other words, your application will always -// see the path as specified in the route. -// -// When false, if the route path is "/path", accessing "/path/" will not match -// this route and vice versa. -// -// Special case: when a route sets a path prefix using the PathPrefix() method, -// strict slash is ignored for that route because the redirect behavior can't -// be determined from a prefix alone. However, any subrouters created from that -// route inherit the original StrictSlash setting. -func (r *Router) StrictSlash(value bool) *Router { - r.strictSlash = value - return r -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// getNamedRoutes returns the map where named routes are registered. -func (r *Router) getNamedRoutes() map[string]*Route { - if r.namedRoutes == nil { - if r.parent != nil { - r.namedRoutes = r.parent.getNamedRoutes() - } else { - r.namedRoutes = make(map[string]*Route) - } - } - return r.namedRoutes -} - -// getRegexpGroup returns regexp definitions from the parent route, if any. -func (r *Router) getRegexpGroup() *routeRegexpGroup { - if r.parent != nil { - return r.parent.getRegexpGroup() - } - return nil -} - -func (r *Router) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// Route factories -// ---------------------------------------------------------------------------- - -// NewRoute registers an empty route. -func (r *Router) NewRoute() *Route { - route := &Route{parent: r, strictSlash: r.strictSlash} - r.routes = append(r.routes, route) - return route -} - -// Handle registers a new route with a matcher for the URL path. -// See Route.Path() and Route.Handler(). -func (r *Router) Handle(path string, handler http.Handler) *Route { - return r.NewRoute().Path(path).Handler(handler) -} - -// HandleFunc registers a new route with a matcher for the URL path. -// See Route.Path() and Route.HandlerFunc(). -func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, - *http.Request)) *Route { - return r.NewRoute().Path(path).HandlerFunc(f) -} - -// Headers registers a new route with a matcher for request header values. -// See Route.Headers(). -func (r *Router) Headers(pairs ...string) *Route { - return r.NewRoute().Headers(pairs...) -} - -// Host registers a new route with a matcher for the URL host. -// See Route.Host(). -func (r *Router) Host(tpl string) *Route { - return r.NewRoute().Host(tpl) -} - -// MatcherFunc registers a new route with a custom matcher function. -// See Route.MatcherFunc(). -func (r *Router) MatcherFunc(f MatcherFunc) *Route { - return r.NewRoute().MatcherFunc(f) -} - -// Methods registers a new route with a matcher for HTTP methods. -// See Route.Methods(). -func (r *Router) Methods(methods ...string) *Route { - return r.NewRoute().Methods(methods...) -} - -// Path registers a new route with a matcher for the URL path. -// See Route.Path(). -func (r *Router) Path(tpl string) *Route { - return r.NewRoute().Path(tpl) -} - -// PathPrefix registers a new route with a matcher for the URL path prefix. -// See Route.PathPrefix(). -func (r *Router) PathPrefix(tpl string) *Route { - return r.NewRoute().PathPrefix(tpl) -} - -// Queries registers a new route with a matcher for URL query values. -// See Route.Queries(). -func (r *Router) Queries(pairs ...string) *Route { - return r.NewRoute().Queries(pairs...) -} - -// Schemes registers a new route with a matcher for URL schemes. -// See Route.Schemes(). -func (r *Router) Schemes(schemes ...string) *Route { - return r.NewRoute().Schemes(schemes...) -} - -// BuildVars registers a new route with a custom function for modifying -// route variables before building a URL. -func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { - return r.NewRoute().BuildVarsFunc(f) -} - -// Walk walks the router and all its sub-routers, calling walkFn for each route -// in the tree. The routes are walked in the order they were added. Sub-routers -// are explored depth-first. -func (r *Router) Walk(walkFn WalkFunc) error { - return r.walk(walkFn, []*Route{}) -} - -// SkipRouter is used as a return value from WalkFuncs to indicate that the -// router that walk is about to descend down to should be skipped. -var SkipRouter = errors.New("skip this router") - -// WalkFunc is the type of the function called for each route visited by Walk. -// At every invocation, it is given the current route, and the current router, -// and a list of ancestor routes that lead to the current route. -type WalkFunc func(route *Route, router *Router, ancestors []*Route) error - -func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { - for _, t := range r.routes { - if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" { - continue - } - - err := walkFn(t, r, ancestors) - if err == SkipRouter { - continue - } - for _, sr := range t.matchers { - if h, ok := sr.(*Router); ok { - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - } - } - if h, ok := t.handler.(*Router); ok { - ancestors = append(ancestors, t) - err := h.walk(walkFn, ancestors) - if err != nil { - return err - } - ancestors = ancestors[:len(ancestors)-1] - } - } - return nil -} - -// ---------------------------------------------------------------------------- -// Context -// ---------------------------------------------------------------------------- - -// RouteMatch stores information about a matched route. -type RouteMatch struct { - Route *Route - Handler http.Handler - Vars map[string]string -} - -type contextKey int - -const ( - varsKey contextKey = iota - routeKey -) - -// Vars returns the route variables for the current request, if any. -func Vars(r *http.Request) map[string]string { - if rv := context.Get(r, varsKey); rv != nil { - return rv.(map[string]string) - } - return nil -} - -// CurrentRoute returns the matched route for the current request, if any. -func CurrentRoute(r *http.Request) *Route { - if rv := context.Get(r, routeKey); rv != nil { - return rv.(*Route) - } - return nil -} - -func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) -} - -func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -// cleanPath returns the canonical path for p, eliminating . and .. elements. -// Borrowed from the net/http package. -func cleanPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root; - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} - -// uniqueVars returns an error if two slices contain duplicated strings. -func uniqueVars(s1, s2 []string) error { - for _, v1 := range s1 { - for _, v2 := range s2 { - if v1 == v2 { - return fmt.Errorf("mux: duplicated route variable %q", v2) - } - } - } - return nil -} - -func checkPairs(pairs ...string) (int, error) { - length := len(pairs) - if length%2 != 0 { - return length, fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - } - return length, nil -} - -// mapFromPairs converts variadic string parameters to a string map. -func mapFromPairsToString(pairs ...string) (map[string]string, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]string, length/2) - for i := 0; i < length; i += 2 { - m[pairs[i]] = pairs[i+1] - } - return m, nil -} - -func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { - length, err := checkPairs(pairs...) - if err != nil { - return nil, err - } - m := make(map[string]*regexp.Regexp, length/2) - for i := 0; i < length; i += 2 { - regex, err := regexp.Compile(pairs[i+1]) - if err != nil { - return nil, err - } - m[pairs[i]] = regex - } - return m, nil -} - -// matchInArray returns true if the given string value is in the array. -func matchInArray(arr []string, value string) bool { - for _, v := range arr { - if v == value { - return true - } - } - return false -} - -// matchMapWithString returns true if the given key/value pairs exist in a given map. -func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != "" { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v == value { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} - -// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against -// the given regex -func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { - for k, v := range toCheck { - // Check if key exists. - if canonicalKey { - k = http.CanonicalHeaderKey(k) - } - if values := toMatch[k]; values == nil { - return false - } else if v != nil { - // If value was defined as an empty string we only check that the - // key exists. Otherwise we also check for equality. - valueExists := false - for _, value := range values { - if v.MatchString(value) { - valueExists = true - break - } - } - if !valueExists { - return false - } - } - } - return true -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go deleted file mode 100644 index ba47727c5..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/mux_test.go +++ /dev/null @@ -1,1195 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "fmt" - "net/http" - "testing" - - "github.com/gorilla/context" -) - -type routeTest struct { - title string // title of the test - route *Route // the route being tested - request *http.Request // a request to test the route - vars map[string]string // the expected vars of the match - host string // the expected host of the match - path string // the expected path of the match - shouldMatch bool // whether the request is expected to match the route at all - shouldRedirect bool // whether the request should result in a redirect -} - -func TestHost(t *testing.T) { - // newRequestHost a new request with a method, url, and host header - newRequestHost := func(method, url, host string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - req.Host = host - return req - } - - tests := []routeTest{ - { - title: "Host route match", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with port, match", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:1234/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: true, - }, - { - title: "Host route with port, wrong port in request URL", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequest("GET", "http://aaa.bbb.ccc:9999/111/222/333"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route, match with host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc"), - request: newRequestHost("GET", "/111/222/333", "aaa.222.ccc"), - vars: map[string]string{}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - // BUG {new(Route).Host("aaa.bbb.ccc:1234"), newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:1234"), map[string]string{}, "aaa.bbb.ccc:1234", "", true}, - { - title: "Host route with port, wrong host in request header", - route: new(Route).Host("aaa.bbb.ccc:1234"), - request: newRequestHost("GET", "/111/222/333", "aaa.bbb.ccc:9999"), - vars: map[string]string{}, - host: "aaa.bbb.ccc:1234", - path: "", - shouldMatch: false, - }, - { - title: "Host route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with pattern, wrong host in request URL", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Host route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: true, - }, - { - title: "Host route with multiple patterns, wrong host in request URL", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc"}, - host: "aaa.bbb.ccc", - path: "", - shouldMatch: false, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/a"), - vars: map[string]string{"category": "a"}, - host: "", - path: "/a", - shouldMatch: true, - }, - { - title: "Path route with single pattern with pipe, match", - route: new(Route).Path("/{category:a|b/c}"), - request: newRequest("GET", "http://localhost/b/c"), - vars: map[string]string{"category": "b/c"}, - host: "", - path: "/b/c", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/a/product_name/1"), - vars: map[string]string{"category": "a", "product": "product_name", "id": "1"}, - host: "", - path: "/a/product_name/1", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns with pipe, match", - route: new(Route).Path("/{category:a|b/c}/{product}/{id:[0-9]+}"), - request: newRequest("GET", "http://localhost/b/c/product_name/1"), - vars: map[string]string{"category": "b/c", "product": "product_name", "id": "1"}, - host: "", - path: "/b/c/product_name/1", - shouldMatch: true, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPath(t *testing.T) { - tests := []routeTest{ - { - title: "Path route, match", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route, match with trailing slash in request and path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - }, - { - title: "Path route, do not match with trailing slash in path", - route: new(Route).Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "Path route, do not match with trailing slash in request", - route: new(Route).Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: false, - }, - { - title: "Path route, wrong path in request in request URL", - route: new(Route).Path("/111/222/333"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with pattern, match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with pattern, URL in request does not match", - route: new(Route).Path("/111/{v1:[0-9]{3}}/333"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Path route with multiple patterns, match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Path route with multiple patterns, URL in request does not match", - route: new(Route).Path("/{v1:[0-9]{3}}/{v2:[0-9]{3}}/{v3:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222", "v3": "333"}, - host: "", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestPathPrefix(t *testing.T) { - tests := []routeTest{ - { - title: "PathPrefix route, match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - }, - { - title: "PathPrefix route, match substring", - route: new(Route).PathPrefix("/1"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{}, - host: "", - path: "/1", - shouldMatch: true, - }, - { - title: "PathPrefix route, URL prefix in request does not match", - route: new(Route).PathPrefix("/111"), - request: newRequest("GET", "http://localhost/1/2/3"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: false, - }, - { - title: "PathPrefix route with pattern, match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with pattern, URL prefix in request does not match", - route: new(Route).PathPrefix("/111/{v1:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - { - title: "PathPrefix route with multiple patterns, match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/222/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: true, - }, - { - title: "PathPrefix route with multiple patterns, URL prefix in request does not match", - route: new(Route).PathPrefix("/{v1:[0-9]{3}}/{v2:[0-9]{3}}"), - request: newRequest("GET", "http://localhost/111/aaa/333"), - vars: map[string]string{"v1": "111", "v2": "222"}, - host: "", - path: "/111/222", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHostPath(t *testing.T) { - tests := []routeTest{ - { - title: "Host and Path route, match", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Host and Path route, wrong host in request URL", - route: new(Route).Host("aaa.bbb.ccc").Path("/111/222/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Host and Path route with pattern, match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with pattern, URL in request does not match", - route: new(Route).Host("aaa.{v1:[a-z]{3}}.ccc").Path("/111/{v2:[0-9]{3}}/333"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "bbb", "v2": "222"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - { - title: "Host and Path route with multiple patterns, match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.bbb.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: true, - }, - { - title: "Host and Path route with multiple patterns, URL in request does not match", - route: new(Route).Host("{v1:[a-z]{3}}.{v2:[a-z]{3}}.{v3:[a-z]{3}}").Path("/{v4:[0-9]{3}}/{v5:[0-9]{3}}/{v6:[0-9]{3}}"), - request: newRequest("GET", "http://aaa.222.ccc/111/222/333"), - vars: map[string]string{"v1": "aaa", "v2": "bbb", "v3": "ccc", "v4": "111", "v5": "222", "v6": "333"}, - host: "aaa.bbb.ccc", - path: "/111/222/333", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestHeaders(t *testing.T) { - // newRequestHeaders creates a new request with a method, url, and headers - newRequestHeaders := func(method, url string, headers map[string]string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - for k, v := range headers { - req.Header.Add(k, v) - } - return req - } - - tests := []routeTest{ - { - title: "Headers route, match", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "ding"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Headers route, bad header values", - route: new(Route).Headers("foo", "bar", "baz", "ding"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar", "baz": "dong"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).Headers("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "bar"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Headers route, regex header values to match", - route: new(Route).HeadersRegexp("foo", "ba[zr]"), - request: newRequestHeaders("GET", "http://localhost", map[string]string{"foo": "baz"}), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } - -} - -func TestMethods(t *testing.T) { - tests := []routeTest{ - { - title: "Methods route, match GET", - route: new(Route).Methods("GET", "POST"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, match POST", - route: new(Route).Methods("GET", "POST"), - request: newRequest("POST", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Methods route, bad method", - route: new(Route).Methods("GET", "POST"), - request: newRequest("PUT", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestQueries(t *testing.T) { - tests := []routeTest{ - { - title: "Queries route, match", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?foo=bar&baz=ding"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, match with a query string out of order", - route: new(Route).Host("www.example.com").Path("/api").Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://www.example.com/api?baz=ding&foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route, bad query", - route: new(Route).Queries("foo", "bar", "baz", "ding"), - request: newRequest("GET", "http://localhost?foo=bar&baz=dong"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with pattern, match", - route: new(Route).Queries("foo", "{v1}"), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{"v1": "bar"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with multiple patterns, match", - route: new(Route).Queries("foo", "{v1}", "baz", "{v2}"), - request: newRequest("GET", "http://localhost?foo=bar&baz=ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=10"), - vars: map[string]string{"v1": "10"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]+}"), - request: newRequest("GET", "http://localhost?foo=a"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?bar=2&foo=1"), - vars: map[string]string{"v1": "1"}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with regexp pattern with quantifier, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with regexp pattern with quantifier, additional variable in query string, regexp does not match", - route: new(Route).Queries("foo", "{v1:[0-9]{1}}"), - request: newRequest("GET", "http://localhost?foo=12"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty value, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo=bar"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with empty value and no parameter in request, should not match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty value and empty parameter in request, should match", - route: new(Route).Queries("foo", ""), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Queries route with overlapping value, should not match", - route: new(Route).Queries("foo", "bar"), - request: newRequest("GET", "http://localhost?foo=barfoo"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with no parameter in request, should not match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - { - title: "Queries route with empty parameter in request, should match", - route: new(Route).Queries("foo", "{bar}"), - request: newRequest("GET", "http://localhost?foo="), - vars: map[string]string{"foo": ""}, - host: "", - path: "", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSchemes(t *testing.T) { - tests := []routeTest{ - // Schemes - { - title: "Schemes route, match https", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "https://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, match ftp", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "ftp://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "Schemes route, bad scheme", - route: new(Route).Schemes("https", "ftp"), - request: newRequest("GET", "http://localhost"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - for _, test := range tests { - testRoute(t, test) - } -} - -func TestMatcherFunc(t *testing.T) { - m := func(r *http.Request, m *RouteMatch) bool { - if r.URL.Host == "aaa.bbb.ccc" { - return true - } - return false - } - - tests := []routeTest{ - { - title: "MatchFunc route, match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.bbb.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: true, - }, - { - title: "MatchFunc route, non-match", - route: new(Route).MatcherFunc(m), - request: newRequest("GET", "http://aaa.222.ccc"), - vars: map[string]string{}, - host: "", - path: "", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestBuildVarsFunc(t *testing.T) { - tests := []routeTest{ - { - title: "BuildVarsFunc set on route", - route: new(Route).Path(`/111/{v1:\d}{v2:.*}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "3" - vars["v2"] = "a" - return vars - }), - request: newRequest("GET", "http://localhost/111/2"), - path: "/111/3a", - shouldMatch: true, - }, - { - title: "BuildVarsFunc set on route and parent route", - route: new(Route).PathPrefix(`/{v1:\d}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v1"] = "2" - return vars - }).Subrouter().Path(`/{v2:\w}`).BuildVarsFunc(func(vars map[string]string) map[string]string { - vars["v2"] = "b" - return vars - }), - request: newRequest("GET", "http://localhost/1/a"), - path: "/2/b", - shouldMatch: true, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestSubRouter(t *testing.T) { - subrouter1 := new(Route).Host("{v1:[a-z]+}.google.com").Subrouter() - subrouter2 := new(Route).PathPrefix("/foo/{v1}").Subrouter() - - tests := []routeTest{ - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://aaa.google.com/bbb"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: true, - }, - { - route: subrouter1.Path("/{v2:[a-z]+}"), - request: newRequest("GET", "http://111.google.com/111"), - vars: map[string]string{"v1": "aaa", "v2": "bbb"}, - host: "aaa.google.com", - path: "/bbb", - shouldMatch: false, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar/baz/ding"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: true, - }, - { - route: subrouter2.Path("/baz/{v2}"), - request: newRequest("GET", "http://localhost/foo/bar"), - vars: map[string]string{"v1": "bar", "v2": "ding"}, - host: "", - path: "/foo/bar/baz/ding", - shouldMatch: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestNamedRoutes(t *testing.T) { - r1 := NewRouter() - r1.NewRoute().Name("a") - r1.NewRoute().Name("b") - r1.NewRoute().Name("c") - - r2 := r1.NewRoute().Subrouter() - r2.NewRoute().Name("d") - r2.NewRoute().Name("e") - r2.NewRoute().Name("f") - - r3 := r2.NewRoute().Subrouter() - r3.NewRoute().Name("g") - r3.NewRoute().Name("h") - r3.NewRoute().Name("i") - - if r1.namedRoutes == nil || len(r1.namedRoutes) != 9 { - t.Errorf("Expected 9 named routes, got %v", r1.namedRoutes) - } else if r1.Get("i") == nil { - t.Errorf("Subroute name not registered") - } -} - -func TestStrictSlash(t *testing.T) { - r := NewRouter() - r.StrictSlash(true) - - tests := []routeTest{ - { - title: "Redirect path without slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path with slash", - route: r.NewRoute().Path("/111/"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111/", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Redirect path with slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111/"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Do not redirect path without slash", - route: r.NewRoute().Path("/111"), - request: newRequest("GET", "http://localhost/111"), - vars: map[string]string{}, - host: "", - path: "/111", - shouldMatch: true, - shouldRedirect: false, - }, - { - title: "Propagate StrictSlash to subrouters", - route: r.NewRoute().PathPrefix("/static/").Subrouter().Path("/images/"), - request: newRequest("GET", "http://localhost/static/images"), - vars: map[string]string{}, - host: "", - path: "/static/images/", - shouldMatch: true, - shouldRedirect: true, - }, - { - title: "Ignore StrictSlash for path prefix", - route: r.NewRoute().PathPrefix("/static/"), - request: newRequest("GET", "http://localhost/static/logo.png"), - vars: map[string]string{}, - host: "", - path: "/static/", - shouldMatch: true, - shouldRedirect: false, - }, - } - - for _, test := range tests { - testRoute(t, test) - } -} - -func TestWalkSingleDepth(t *testing.T) { - r0 := NewRouter() - r1 := NewRouter() - r2 := NewRouter() - - r0.Path("/g") - r0.Path("/o") - r0.Path("/d").Handler(r1) - r0.Path("/r").Handler(r2) - r0.Path("/a") - - r1.Path("/z") - r1.Path("/i") - r1.Path("/l") - r1.Path("/l") - - r2.Path("/i") - r2.Path("/l") - r2.Path("/l") - - paths := []string{"g", "o", "r", "i", "l", "l", "a"} - depths := []int{0, 0, 0, 1, 1, 1, 0} - i := 0 - err := r0.Walk(func(route *Route, router *Router, ancestors []*Route) error { - matcher := route.matchers[0].(*routeRegexp) - if matcher.template == "/d" { - return SkipRouter - } - if len(ancestors) != depths[i] { - t.Errorf(`Expected depth of %d at i = %d; got "%s"`, depths[i], i, len(ancestors)) - } - if matcher.template != "/"+paths[i] { - t.Errorf(`Expected "/%s" at i = %d; got "%s"`, paths[i], i, matcher.template) - } - i++ - return nil - }) - if err != nil { - panic(err) - } - if i != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), i) - } -} - -func TestWalkNested(t *testing.T) { - router := NewRouter() - - g := router.Path("/g").Subrouter() - o := g.PathPrefix("/o").Subrouter() - r := o.PathPrefix("/r").Subrouter() - i := r.PathPrefix("/i").Subrouter() - l1 := i.PathPrefix("/l").Subrouter() - l2 := l1.PathPrefix("/l").Subrouter() - l2.Path("/a") - - paths := []string{"/g", "/g/o", "/g/o/r", "/g/o/r/i", "/g/o/r/i/l", "/g/o/r/i/l/l", "/g/o/r/i/l/l/a"} - idx := 0 - err := router.Walk(func(route *Route, router *Router, ancestors []*Route) error { - path := paths[idx] - tpl := route.regexp.path.template - if tpl != path { - t.Errorf(`Expected %s got %s`, path, tpl) - } - idx++ - return nil - }) - if err != nil { - panic(err) - } - if idx != len(paths) { - t.Errorf("Expected %d routes, found %d", len(paths), idx) - } -} - -// ---------------------------------------------------------------------------- -// Helpers -// ---------------------------------------------------------------------------- - -func getRouteTemplate(route *Route) string { - host, path := "none", "none" - if route.regexp != nil { - if route.regexp.host != nil { - host = route.regexp.host.template - } - if route.regexp.path != nil { - path = route.regexp.path.template - } - } - return fmt.Sprintf("Host: %v, Path: %v", host, path) -} - -func testRoute(t *testing.T, test routeTest) { - request := test.request - route := test.route - vars := test.vars - shouldMatch := test.shouldMatch - host := test.host - path := test.path - url := test.host + test.path - shouldRedirect := test.shouldRedirect - - var match RouteMatch - ok := route.Match(request, &match) - if ok != shouldMatch { - msg := "Should match" - if !shouldMatch { - msg = "Should not match" - } - t.Errorf("(%v) %v:\nRoute: %#v\nRequest: %#v\nVars: %v\n", test.title, msg, route, request, vars) - return - } - if shouldMatch { - if test.vars != nil && !stringMapEqual(test.vars, match.Vars) { - t.Errorf("(%v) Vars not equal: expected %v, got %v", test.title, vars, match.Vars) - return - } - if host != "" { - u, _ := test.route.URLHost(mapToPairs(match.Vars)...) - if host != u.Host { - t.Errorf("(%v) URLHost not equal: expected %v, got %v -- %v", test.title, host, u.Host, getRouteTemplate(route)) - return - } - } - if path != "" { - u, _ := route.URLPath(mapToPairs(match.Vars)...) - if path != u.Path { - t.Errorf("(%v) URLPath not equal: expected %v, got %v -- %v", test.title, path, u.Path, getRouteTemplate(route)) - return - } - } - if url != "" { - u, _ := route.URL(mapToPairs(match.Vars)...) - if url != u.Host+u.Path { - t.Errorf("(%v) URL not equal: expected %v, got %v -- %v", test.title, url, u.Host+u.Path, getRouteTemplate(route)) - return - } - } - if shouldRedirect && match.Handler == nil { - t.Errorf("(%v) Did not redirect", test.title) - return - } - if !shouldRedirect && match.Handler != nil { - t.Errorf("(%v) Unexpected redirect", test.title) - return - } - } -} - -// Tests that the context is cleared or not cleared properly depending on -// the configuration of the router -func TestKeepContext(t *testing.T) { - func1 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - res := new(http.ResponseWriter) - r.ServeHTTP(*res, req) - - if _, ok := context.GetOk(req, "t"); ok { - t.Error("Context should have been cleared at end of request") - } - - r.KeepContext = true - - req, _ = http.NewRequest("GET", "http://localhost/", nil) - context.Set(req, "t", 1) - - r.ServeHTTP(*res, req) - if _, ok := context.GetOk(req, "t"); !ok { - t.Error("Context should NOT have been cleared at end of request") - } - -} - -type TestA301ResponseWriter struct { - hh http.Header - status int -} - -func (ho TestA301ResponseWriter) Header() http.Header { - return http.Header(ho.hh) -} - -func (ho TestA301ResponseWriter) Write(b []byte) (int, error) { - return 0, nil -} - -func (ho TestA301ResponseWriter) WriteHeader(code int) { - ho.status = code -} - -func Test301Redirect(t *testing.T) { - m := make(http.Header) - - func1 := func(w http.ResponseWriter, r *http.Request) {} - func2 := func(w http.ResponseWriter, r *http.Request) {} - - r := NewRouter() - r.HandleFunc("/api/", func2).Name("func2") - r.HandleFunc("/", func1).Name("func1") - - req, _ := http.NewRequest("GET", "http://localhost//api/?abc=def", nil) - - res := TestA301ResponseWriter{ - hh: m, - status: 0, - } - r.ServeHTTP(&res, req) - - if "http://localhost/api/?abc=def" != res.hh["Location"][0] { - t.Errorf("Should have complete URL with query string") - } -} - -// https://plus.google.com/101022900381697718949/posts/eWy6DjFJ6uW -func TestSubrouterHeader(t *testing.T) { - expected := "func1 response" - func1 := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, expected) - } - func2 := func(http.ResponseWriter, *http.Request) {} - - r := NewRouter() - s := r.Headers("SomeSpecialHeader", "").Subrouter() - s.HandleFunc("/", func1).Name("func1") - r.HandleFunc("/", func2).Name("func2") - - req, _ := http.NewRequest("GET", "http://localhost/", nil) - req.Header.Add("SomeSpecialHeader", "foo") - match := new(RouteMatch) - matched := r.Match(req, match) - if !matched { - t.Errorf("Should match request") - } - if match.Route.GetName() != "func1" { - t.Errorf("Expecting func1 handler, got %s", match.Route.GetName()) - } - resp := NewRecorder() - match.Handler.ServeHTTP(resp, req) - if resp.Body.String() != expected { - t.Errorf("Expecting %q", expected) - } -} - -// mapToPairs converts a string map to a slice of string pairs -func mapToPairs(m map[string]string) []string { - var i int - p := make([]string, len(m)*2) - for k, v := range m { - p[i] = k - p[i+1] = v - i += 2 - } - return p -} - -// stringMapEqual checks the equality of two string maps -func stringMapEqual(m1, m2 map[string]string) bool { - nil1 := m1 == nil - nil2 := m2 == nil - if nil1 != nil2 || len(m1) != len(m2) { - return false - } - for k, v := range m1 { - if v != m2[k] { - return false - } - } - return true -} - -// newRequest is a helper function to create a new request with a method and url -func newRequest(method, url string) *http.Request { - req, err := http.NewRequest(method, url, nil) - if err != nil { - panic(err) - } - return req -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go b/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go deleted file mode 100644 index 1f7c190c0..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/old_test.go +++ /dev/null @@ -1,714 +0,0 @@ -// Old tests ported to Go1. This is a mess. Want to drop it one day. - -// Copyright 2011 Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "net/http" - "testing" -) - -// ---------------------------------------------------------------------------- -// ResponseRecorder -// ---------------------------------------------------------------------------- -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// ResponseRecorder is an implementation of http.ResponseWriter that -// records its mutations for later inspection in tests. -type ResponseRecorder struct { - Code int // the HTTP response code from WriteHeader - HeaderMap http.Header // the HTTP response headers - Body *bytes.Buffer // if non-nil, the bytes.Buffer to append written data to - Flushed bool -} - -// NewRecorder returns an initialized ResponseRecorder. -func NewRecorder() *ResponseRecorder { - return &ResponseRecorder{ - HeaderMap: make(http.Header), - Body: new(bytes.Buffer), - } -} - -// DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. -const DefaultRemoteAddr = "1.2.3.4" - -// Header returns the response headers. -func (rw *ResponseRecorder) Header() http.Header { - return rw.HeaderMap -} - -// Write always succeeds and writes to rw.Body, if not nil. -func (rw *ResponseRecorder) Write(buf []byte) (int, error) { - if rw.Body != nil { - rw.Body.Write(buf) - } - if rw.Code == 0 { - rw.Code = http.StatusOK - } - return len(buf), nil -} - -// WriteHeader sets rw.Code. -func (rw *ResponseRecorder) WriteHeader(code int) { - rw.Code = code -} - -// Flush sets rw.Flushed to true. -func (rw *ResponseRecorder) Flush() { - rw.Flushed = true -} - -// ---------------------------------------------------------------------------- - -func TestRouteMatchers(t *testing.T) { - var scheme, host, path, query, method string - var headers map[string]string - var resultVars map[bool]map[string]string - - router := NewRouter() - router.NewRoute().Host("{var1}.google.com"). - Path("/{var2:[a-z]+}/{var3:[0-9]+}"). - Queries("foo", "bar"). - Methods("GET"). - Schemes("https"). - Headers("x-requested-with", "XMLHttpRequest") - router.NewRoute().Host("www.{var4}.com"). - PathPrefix("/foo/{var5:[a-z]+}/{var6:[0-9]+}"). - Queries("baz", "ding"). - Methods("POST"). - Schemes("http"). - Headers("Content-Type", "application/json") - - reset := func() { - // Everything match. - scheme = "https" - host = "www.google.com" - path = "/product/42" - query = "?foo=bar" - method = "GET" - headers = map[string]string{"X-Requested-With": "XMLHttpRequest"} - resultVars = map[bool]map[string]string{ - true: {"var1": "www", "var2": "product", "var3": "42"}, - false: {}, - } - } - - reset2 := func() { - // Everything match. - scheme = "http" - host = "www.google.com" - path = "/foo/product/42/path/that/is/ignored" - query = "?baz=ding" - method = "POST" - headers = map[string]string{"Content-Type": "application/json"} - resultVars = map[bool]map[string]string{ - true: {"var4": "google", "var5": "product", "var6": "42"}, - false: {}, - } - } - - match := func(shouldMatch bool) { - url := scheme + "://" + host + path + query - request, _ := http.NewRequest(method, url, nil) - for key, value := range headers { - request.Header.Add(key, value) - } - - var routeMatch RouteMatch - matched := router.Match(request, &routeMatch) - if matched != shouldMatch { - // Need better messages. :) - if matched { - t.Errorf("Should match.") - } else { - t.Errorf("Should not match.") - } - } - - if matched { - currentRoute := routeMatch.Route - if currentRoute == nil { - t.Errorf("Expected a current route.") - } - vars := routeMatch.Vars - expectedVars := resultVars[shouldMatch] - if len(vars) != len(expectedVars) { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - for name, value := range vars { - if expectedVars[name] != value { - t.Errorf("Expected vars: %v Got: %v.", expectedVars, vars) - } - } - } - } - - // 1st route -------------------------------------------------------------- - - // Everything match. - reset() - match(true) - - // Scheme doesn't match. - reset() - scheme = "http" - match(false) - - // Host doesn't match. - reset() - host = "www.mygoogle.com" - match(false) - - // Path doesn't match. - reset() - path = "/product/notdigits" - match(false) - - // Query doesn't match. - reset() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset() - method = "POST" - match(false) - - // Header doesn't match. - reset() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset() - match(true) - - // 2nd route -------------------------------------------------------------- - - // Everything match. - reset2() - match(true) - - // Scheme doesn't match. - reset2() - scheme = "https" - match(false) - - // Host doesn't match. - reset2() - host = "sub.google.com" - match(false) - - // Path doesn't match. - reset2() - path = "/bar/product/42" - match(false) - - // Query doesn't match. - reset2() - query = "?foo=baz" - match(false) - - // Method doesn't match. - reset2() - method = "GET" - match(false) - - // Header doesn't match. - reset2() - headers = map[string]string{} - match(false) - - // Everything match, again. - reset2() - match(true) -} - -type headerMatcherTest struct { - matcher headerMatcher - headers map[string]string - result bool -} - -var headerMatcherTests = []headerMatcherTest{ - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{"X-Requested-With": "XMLHttpRequest"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": ""}), - headers: map[string]string{"X-Requested-With": "anything"}, - result: true, - }, - { - matcher: headerMatcher(map[string]string{"x-requested-with": "XMLHttpRequest"}), - headers: map[string]string{}, - result: false, - }, -} - -type hostMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var hostMatcherTests = []hostMatcherTest{ - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://abc.def.ghi/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Host("{foo:[a-z][a-z][a-z]}.{bar:[a-z][a-z][a-z]}.{baz:[a-z][a-z][a-z]}"), - url: "http://a.b.c/", - vars: map[string]string{"foo": "abc", "bar": "def", "baz": "ghi"}, - result: false, - }, -} - -type methodMatcherTest struct { - matcher methodMatcher - method string - result bool -} - -var methodMatcherTests = []methodMatcherTest{ - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "GET", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "POST", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "PUT", - result: true, - }, - { - matcher: methodMatcher([]string{"GET", "POST", "PUT"}), - method: "DELETE", - result: false, - }, -} - -type pathMatcherTest struct { - matcher *Route - url string - vars map[string]string - result bool -} - -var pathMatcherTests = []pathMatcherTest{ - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/123/456/789", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: true, - }, - { - matcher: NewRouter().NewRoute().Path("/{foo:[0-9][0-9][0-9]}/{bar:[0-9][0-9][0-9]}/{baz:[0-9][0-9][0-9]}"), - url: "http://localhost:8080/1/2/3", - vars: map[string]string{"foo": "123", "bar": "456", "baz": "789"}, - result: false, - }, -} - -type schemeMatcherTest struct { - matcher schemeMatcher - url string - result bool -} - -var schemeMatcherTests = []schemeMatcherTest{ - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "http://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"http", "https"}), - url: "https://localhost:8080/", - result: true, - }, - { - matcher: schemeMatcher([]string{"https"}), - url: "http://localhost:8080/", - result: false, - }, - { - matcher: schemeMatcher([]string{"http"}), - url: "https://localhost:8080/", - result: false, - }, -} - -type urlBuildingTest struct { - route *Route - vars []string - url string -} - -var urlBuildingTests = []urlBuildingTest{ - { - route: new(Route).Host("foo.domain.com"), - vars: []string{}, - url: "http://foo.domain.com", - }, - { - route: new(Route).Host("{subdomain}.domain.com"), - vars: []string{"subdomain", "bar"}, - url: "http://bar.domain.com", - }, - { - route: new(Route).Host("foo.domain.com").Path("/articles"), - vars: []string{}, - url: "http://foo.domain.com/articles", - }, - { - route: new(Route).Path("/articles"), - vars: []string{}, - url: "/articles", - }, - { - route: new(Route).Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"category", "technology", "id", "42"}, - url: "/articles/technology/42", - }, - { - route: new(Route).Host("{subdomain}.domain.com").Path("/articles/{category}/{id:[0-9]+}"), - vars: []string{"subdomain", "foo", "category", "technology", "id", "42"}, - url: "http://foo.domain.com/articles/technology/42", - }, -} - -func TestHeaderMatcher(t *testing.T) { - for _, v := range headerMatcherTests { - request, _ := http.NewRequest("GET", "http://localhost:8080/", nil) - for key, value := range v.headers { - request.Header.Add(key, value) - } - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, request.Header) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, request.Header) - } - } - } -} - -func TestHostMatcher(t *testing.T) { - for _, v := range hostMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestMethodMatcher(t *testing.T) { - for _, v := range methodMatcherTests { - request, _ := http.NewRequest(v.method, "http://localhost:8080/", nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.method) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.method) - } - } - } -} - -func TestPathMatcher(t *testing.T) { - for _, v := range pathMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - vars := routeMatch.Vars - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - if result { - if len(vars) != len(v.vars) { - t.Errorf("%#v: vars length should be %v, got %v.", v.matcher, len(v.vars), len(vars)) - } - for name, value := range vars { - if v.vars[name] != value { - t.Errorf("%#v: expected value %v for key %v, got %v.", v.matcher, v.vars[name], name, value) - } - } - } else { - if len(vars) != 0 { - t.Errorf("%#v: vars length should be 0, got %v.", v.matcher, len(vars)) - } - } - } -} - -func TestSchemeMatcher(t *testing.T) { - for _, v := range schemeMatcherTests { - request, _ := http.NewRequest("GET", v.url, nil) - var routeMatch RouteMatch - result := v.matcher.Match(request, &routeMatch) - if result != v.result { - if v.result { - t.Errorf("%#v: should match %v.", v.matcher, v.url) - } else { - t.Errorf("%#v: should not match %v.", v.matcher, v.url) - } - } - } -} - -func TestUrlBuilding(t *testing.T) { - - for _, v := range urlBuildingTests { - u, _ := v.route.URL(v.vars...) - url := u.String() - if url != v.url { - t.Errorf("expected %v, got %v", v.url, url) - /* - reversePath := "" - reverseHost := "" - if v.route.pathTemplate != nil { - reversePath = v.route.pathTemplate.Reverse - } - if v.route.hostTemplate != nil { - reverseHost = v.route.hostTemplate.Reverse - } - - t.Errorf("%#v:\nexpected: %q\ngot: %q\nreverse path: %q\nreverse host: %q", v.route, v.url, url, reversePath, reverseHost) - */ - } - } - - ArticleHandler := func(w http.ResponseWriter, r *http.Request) { - } - - router := NewRouter() - router.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).Name("article") - - url, _ := router.Get("article").URL("category", "technology", "id", "42") - expected := "/articles/technology/42" - if url.String() != expected { - t.Errorf("Expected %v, got %v", expected, url.String()) - } -} - -func TestMatchedRouteName(t *testing.T) { - routeName := "stock" - router := NewRouter() - route := router.NewRoute().Path("/products/").Name(routeName) - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - retName := rv.Route.GetName() - if retName != routeName { - t.Errorf("Expected %q, got %q.", routeName, retName) - } -} - -func TestSubRouting(t *testing.T) { - // Example from docs. - router := NewRouter() - subrouter := router.NewRoute().Host("www.domain.com").Subrouter() - route := subrouter.NewRoute().Path("/products/").Name("products") - - url := "http://www.domain.com/products/" - request, _ := http.NewRequest("GET", url, nil) - var rv RouteMatch - ok := router.Match(request, &rv) - - if !ok || rv.Route != route { - t.Errorf("Expected same route, got %+v.", rv.Route) - } - - u, _ := router.Get("products").URL() - builtUrl := u.String() - // Yay, subroute aware of the domain when building! - if builtUrl != url { - t.Errorf("Expected %q, got %q.", url, builtUrl) - } -} - -func TestVariableNames(t *testing.T) { - route := new(Route).Host("{arg1}.domain.com").Path("/{arg1}/{arg2:[0-9]+}") - if route.err == nil { - t.Errorf("Expected error for duplicated variable names") - } -} - -func TestRedirectSlash(t *testing.T) { - var route *Route - var routeMatch RouteMatch - r := NewRouter() - - r.StrictSlash(false) - route = r.NewRoute() - if route.strictSlash != false { - t.Errorf("Expected false redirectSlash.") - } - - r.StrictSlash(true) - route = r.NewRoute() - if route.strictSlash != true { - t.Errorf("Expected true redirectSlash.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}/") - request, _ := http.NewRequest("GET", "http://localhost/foo/123", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars := routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp := NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123/" { - t.Errorf("Expected redirect header.") - } - - route = new(Route) - route.strictSlash = true - route.Path("/{arg1}/{arg2:[0-9]+}") - request, _ = http.NewRequest("GET", "http://localhost/foo/123/", nil) - routeMatch = RouteMatch{} - _ = route.Match(request, &routeMatch) - vars = routeMatch.Vars - if vars["arg1"] != "foo" { - t.Errorf("Expected foo.") - } - if vars["arg2"] != "123" { - t.Errorf("Expected 123.") - } - rsp = NewRecorder() - routeMatch.Handler.ServeHTTP(rsp, request) - if rsp.HeaderMap.Get("Location") != "http://localhost/foo/123" { - t.Errorf("Expected redirect header.") - } -} - -// Test for the new regexp library, still not available in stable Go. -func TestNewRegexp(t *testing.T) { - var p *routeRegexp - var matches []string - - tests := map[string]map[string][]string{ - "/{foo:a{2}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": nil, - "/aaaa": nil, - }, - "/{foo:a{2,}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": {"aaaa"}, - }, - "/{foo:a{2,3}}": { - "/a": nil, - "/aa": {"aa"}, - "/aaa": {"aaa"}, - "/aaaa": nil, - }, - "/{foo:[a-z]{3}}/{bar:[a-z]{2}}": { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abcd": nil, - "/abc/ab": {"abc", "ab"}, - "/abc/abc": nil, - "/abcd/ab": nil, - }, - `/{foo:\w{3,}}/{bar:\d{2,}}`: { - "/a": nil, - "/ab": nil, - "/abc": nil, - "/abc/1": nil, - "/abc/12": {"abc", "12"}, - "/abcd/12": {"abcd", "12"}, - "/abcd/123": {"abcd", "123"}, - }, - } - - for pattern, paths := range tests { - p, _ = newRouteRegexp(pattern, false, false, false, false) - for path, result := range paths { - matches = p.regexp.FindStringSubmatch(path) - if result == nil { - if matches != nil { - t.Errorf("%v should not match %v.", pattern, path) - } - } else { - if len(matches) != len(result)+1 { - t.Errorf("Expected %v matches, got %v.", len(result)+1, len(matches)) - } else { - for k, v := range result { - if matches[k+1] != v { - t.Errorf("Expected %v, got %v.", v, matches[k+1]) - } - } - } - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go b/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go deleted file mode 100644 index 7c636d0ef..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/regexp.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "bytes" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// newRouteRegexp parses a route template and returns a routeRegexp, -// used to match a host, a path or a query string. -// -// It will extract named variables, assemble a regexp to be matched, create -// a "reverse" template to build URLs and compile regexps to validate variable -// values used in URL building. -// -// Previously we accepted only Python-like identifiers for variable -// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that -// name and pattern can't be empty, and names can't contain a colon. -func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) { - // Check if it is well-formed. - idxs, errBraces := braceIndices(tpl) - if errBraces != nil { - return nil, errBraces - } - // Backup the original. - template := tpl - // Now let's parse it. - defaultPattern := "[^/]+" - if matchQuery { - defaultPattern = "[^?&]*" - } else if matchHost { - defaultPattern = "[^.]+" - matchPrefix = false - } - // Only match strict slash if not matching - if matchPrefix || matchHost || matchQuery { - strictSlash = false - } - // Set a flag for strictSlash. - endSlash := false - if strictSlash && strings.HasSuffix(tpl, "/") { - tpl = tpl[:len(tpl)-1] - endSlash = true - } - varsN := make([]string, len(idxs)/2) - varsR := make([]*regexp.Regexp, len(idxs)/2) - pattern := bytes.NewBufferString("") - pattern.WriteByte('^') - reverse := bytes.NewBufferString("") - var end int - var err error - for i := 0; i < len(idxs); i += 2 { - // Set all values we are interested in. - raw := tpl[end:idxs[i]] - end = idxs[i+1] - parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) - name := parts[0] - patt := defaultPattern - if len(parts) == 2 { - patt = parts[1] - } - // Name or pattern can't be empty. - if name == "" || patt == "" { - return nil, fmt.Errorf("mux: missing name or pattern in %q", - tpl[idxs[i]:end]) - } - // Build the regexp pattern. - fmt.Fprintf(pattern, "%s(%s)", regexp.QuoteMeta(raw), patt) - // Build the reverse template. - fmt.Fprintf(reverse, "%s%%s", raw) - - // Append variable name and compiled pattern. - varsN[i/2] = name - varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) - if err != nil { - return nil, err - } - } - // Add the remaining. - raw := tpl[end:] - pattern.WriteString(regexp.QuoteMeta(raw)) - if strictSlash { - pattern.WriteString("[/]?") - } - if matchQuery { - // Add the default pattern if the query value is empty - if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { - pattern.WriteString(defaultPattern) - } - } - if !matchPrefix { - pattern.WriteByte('$') - } - reverse.WriteString(raw) - if endSlash { - reverse.WriteByte('/') - } - // Compile full regexp. - reg, errCompile := regexp.Compile(pattern.String()) - if errCompile != nil { - return nil, errCompile - } - // Done! - return &routeRegexp{ - template: template, - matchHost: matchHost, - matchQuery: matchQuery, - strictSlash: strictSlash, - regexp: reg, - reverse: reverse.String(), - varsN: varsN, - varsR: varsR, - }, nil -} - -// routeRegexp stores a regexp to match a host or path and information to -// collect and validate route variables. -type routeRegexp struct { - // The unmodified template. - template string - // True for host match, false for path or query string match. - matchHost bool - // True for query string match, false for path and host match. - matchQuery bool - // The strictSlash value defined on the route, but disabled if PathPrefix was used. - strictSlash bool - // Expanded regexp. - regexp *regexp.Regexp - // Reverse template. - reverse string - // Variable names. - varsN []string - // Variable regexps (validators). - varsR []*regexp.Regexp -} - -// Match matches the regexp against the URL host or path. -func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { - if !r.matchHost { - if r.matchQuery { - return r.matchQueryString(req) - } else { - return r.regexp.MatchString(req.URL.Path) - } - } - return r.regexp.MatchString(getHost(req)) -} - -// url builds a URL part using the given values. -func (r *routeRegexp) url(values map[string]string) (string, error) { - urlValues := make([]interface{}, len(r.varsN)) - for k, v := range r.varsN { - value, ok := values[v] - if !ok { - return "", fmt.Errorf("mux: missing route variable %q", v) - } - urlValues[k] = value - } - rv := fmt.Sprintf(r.reverse, urlValues...) - if !r.regexp.MatchString(rv) { - // The URL is checked against the full regexp, instead of checking - // individual variables. This is faster but to provide a good error - // message, we check individual regexps if the URL doesn't match. - for k, v := range r.varsN { - if !r.varsR[k].MatchString(values[v]) { - return "", fmt.Errorf( - "mux: variable %q doesn't match, expected %q", values[v], - r.varsR[k].String()) - } - } - } - return rv, nil -} - -// getUrlQuery returns a single query parameter from a request URL. -// For a URL with foo=bar&baz=ding, we return only the relevant key -// value pair for the routeRegexp. -func (r *routeRegexp) getUrlQuery(req *http.Request) string { - if !r.matchQuery { - return "" - } - templateKey := strings.SplitN(r.template, "=", 2)[0] - for key, vals := range req.URL.Query() { - if key == templateKey && len(vals) > 0 { - return key + "=" + vals[0] - } - } - return "" -} - -func (r *routeRegexp) matchQueryString(req *http.Request) bool { - return r.regexp.MatchString(r.getUrlQuery(req)) -} - -// braceIndices returns the first level curly brace indices from a string. -// It returns an error in case of unbalanced braces. -func braceIndices(s string) ([]int, error) { - var level, idx int - idxs := make([]int, 0) - for i := 0; i < len(s); i++ { - switch s[i] { - case '{': - if level++; level == 1 { - idx = i - } - case '}': - if level--; level == 0 { - idxs = append(idxs, idx, i+1) - } else if level < 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - } - } - if level != 0 { - return nil, fmt.Errorf("mux: unbalanced braces in %q", s) - } - return idxs, nil -} - -// ---------------------------------------------------------------------------- -// routeRegexpGroup -// ---------------------------------------------------------------------------- - -// routeRegexpGroup groups the route matchers that carry variables. -type routeRegexpGroup struct { - host *routeRegexp - path *routeRegexp - queries []*routeRegexp -} - -// setMatch extracts the variables from the URL once a route matches. -func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { - // Store host variables. - if v.host != nil { - hostVars := v.host.regexp.FindStringSubmatch(getHost(req)) - if hostVars != nil { - for k, v := range v.host.varsN { - m.Vars[v] = hostVars[k+1] - } - } - } - // Store path variables. - if v.path != nil { - pathVars := v.path.regexp.FindStringSubmatch(req.URL.Path) - if pathVars != nil { - for k, v := range v.path.varsN { - m.Vars[v] = pathVars[k+1] - } - // Check if we should redirect. - if v.path.strictSlash { - p1 := strings.HasSuffix(req.URL.Path, "/") - p2 := strings.HasSuffix(v.path.template, "/") - if p1 != p2 { - u, _ := url.Parse(req.URL.String()) - if p1 { - u.Path = u.Path[:len(u.Path)-1] - } else { - u.Path += "/" - } - m.Handler = http.RedirectHandler(u.String(), 301) - } - } - } - } - // Store query string variables. - for _, q := range v.queries { - queryVars := q.regexp.FindStringSubmatch(q.getUrlQuery(req)) - if queryVars != nil { - for k, v := range q.varsN { - m.Vars[v] = queryVars[k+1] - } - } - } -} - -// getHost tries its best to return the request host. -func getHost(r *http.Request) string { - if r.URL.IsAbs() { - return r.URL.Host - } - host := r.Host - // Slice off any port information. - if i := strings.Index(host, ":"); i != -1 { - host = host[:i] - } - return host - -} diff --git a/Godeps/_workspace/src/github.com/gorilla/mux/route.go b/Godeps/_workspace/src/github.com/gorilla/mux/route.go deleted file mode 100644 index 75481b579..000000000 --- a/Godeps/_workspace/src/github.com/gorilla/mux/route.go +++ /dev/null @@ -1,603 +0,0 @@ -// Copyright 2012 The Gorilla Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mux - -import ( - "errors" - "fmt" - "net/http" - "net/url" - "regexp" - "strings" -) - -// Route stores information to match a request and build URLs. -type Route struct { - // Parent where the route was registered (a Router). - parent parentRoute - // Request handler for the route. - handler http.Handler - // List of matchers. - matchers []matcher - // Manager for the variables from host and path. - regexp *routeRegexpGroup - // If true, when the path pattern is "/path/", accessing "/path" will - // redirect to the former and vice versa. - strictSlash bool - // If true, this route never matches: it is only used to build URLs. - buildOnly bool - // The name used to build URLs. - name string - // Error resulted from building a route. - err error - - buildVarsFunc BuildVarsFunc -} - -// Match matches the route against the request. -func (r *Route) Match(req *http.Request, match *RouteMatch) bool { - if r.buildOnly || r.err != nil { - return false - } - // Match everything. - for _, m := range r.matchers { - if matched := m.Match(req, match); !matched { - return false - } - } - // Yay, we have a match. Let's collect some info about it. - if match.Route == nil { - match.Route = r - } - if match.Handler == nil { - match.Handler = r.handler - } - if match.Vars == nil { - match.Vars = make(map[string]string) - } - // Set variables. - if r.regexp != nil { - r.regexp.setMatch(req, match, r) - } - return true -} - -// ---------------------------------------------------------------------------- -// Route attributes -// ---------------------------------------------------------------------------- - -// GetError returns an error resulted from building the route, if any. -func (r *Route) GetError() error { - return r.err -} - -// BuildOnly sets the route to never match: it is only used to build URLs. -func (r *Route) BuildOnly() *Route { - r.buildOnly = true - return r -} - -// Handler -------------------------------------------------------------------- - -// Handler sets a handler for the route. -func (r *Route) Handler(handler http.Handler) *Route { - if r.err == nil { - r.handler = handler - } - return r -} - -// HandlerFunc sets a handler function for the route. -func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { - return r.Handler(http.HandlerFunc(f)) -} - -// GetHandler returns the handler for the route, if any. -func (r *Route) GetHandler() http.Handler { - return r.handler -} - -// Name ----------------------------------------------------------------------- - -// Name sets the name for the route, used to build URLs. -// If the name was registered already it will be overwritten. -func (r *Route) Name(name string) *Route { - if r.name != "" { - r.err = fmt.Errorf("mux: route already has name %q, can't set %q", - r.name, name) - } - if r.err == nil { - r.name = name - r.getNamedRoutes()[name] = r - } - return r -} - -// GetName returns the name for the route, if any. -func (r *Route) GetName() string { - return r.name -} - -// ---------------------------------------------------------------------------- -// Matchers -// ---------------------------------------------------------------------------- - -// matcher types try to match a request. -type matcher interface { - Match(*http.Request, *RouteMatch) bool -} - -// addMatcher adds a matcher to the route. -func (r *Route) addMatcher(m matcher) *Route { - if r.err == nil { - r.matchers = append(r.matchers, m) - } - return r -} - -// addRegexpMatcher adds a host or path matcher and builder to a route. -func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error { - if r.err != nil { - return r.err - } - r.regexp = r.getRegexpGroup() - if !matchHost && !matchQuery { - if len(tpl) == 0 || tpl[0] != '/' { - return fmt.Errorf("mux: path must start with a slash, got %q", tpl) - } - if r.regexp.path != nil { - tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl - } - } - rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash) - if err != nil { - return err - } - for _, q := range r.regexp.queries { - if err = uniqueVars(rr.varsN, q.varsN); err != nil { - return err - } - } - if matchHost { - if r.regexp.path != nil { - if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { - return err - } - } - r.regexp.host = rr - } else { - if r.regexp.host != nil { - if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { - return err - } - } - if matchQuery { - r.regexp.queries = append(r.regexp.queries, rr) - } else { - r.regexp.path = rr - } - } - r.addMatcher(rr) - return nil -} - -// Headers -------------------------------------------------------------------- - -// headerMatcher matches the request against header values. -type headerMatcher map[string]string - -func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithString(m, r.Header, true) -} - -// Headers adds a matcher for request header values. -// It accepts a sequence of key/value pairs to be matched. For example: -// -// r := mux.NewRouter() -// r.Headers("Content-Type", "application/json", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both request header values match. -// Alternatively, you can provide a regular expression and match the header as follows: -// -// r.Headers("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will the same as the previous example, with the addition of matching -// application/text as well. -// -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) Headers(pairs ...string) *Route { - if r.err == nil { - var headers map[string]string - headers, r.err = mapFromPairsToString(pairs...) - return r.addMatcher(headerMatcher(headers)) - } - return r -} - -// headerRegexMatcher matches the request against the route given a regex for the header -type headerRegexMatcher map[string]*regexp.Regexp - -func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchMapWithRegex(m, r.Header, true) -} - -// Regular expressions can be used with headers as well. -// It accepts a sequence of key/value pairs, where the value has regex support. For example -// r := mux.NewRouter() -// r.HeadersRegexp("Content-Type", "application/(text|json)", -// "X-Requested-With", "XMLHttpRequest") -// -// The above route will only match if both the request header matches both regular expressions. -// It the value is an empty string, it will match any value if the key is set. -func (r *Route) HeadersRegexp(pairs ...string) *Route { - if r.err == nil { - var headers map[string]*regexp.Regexp - headers, r.err = mapFromPairsToRegex(pairs...) - return r.addMatcher(headerRegexMatcher(headers)) - } - return r -} - -// Host ----------------------------------------------------------------------- - -// Host adds a matcher for the URL host. -// It accepts a template with zero or more URL variables enclosed by {}. -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next dot. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Host("www.domain.com") -// r.Host("{subdomain}.domain.com") -// r.Host("{subdomain:[a-z]+}.domain.com") -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Host(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, true, false, false) - return r -} - -// MatcherFunc ---------------------------------------------------------------- - -// MatcherFunc is the function signature used by custom matchers. -type MatcherFunc func(*http.Request, *RouteMatch) bool - -func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { - return m(r, match) -} - -// MatcherFunc adds a custom function to be used as request matcher. -func (r *Route) MatcherFunc(f MatcherFunc) *Route { - return r.addMatcher(f) -} - -// Methods -------------------------------------------------------------------- - -// methodMatcher matches the request against HTTP methods. -type methodMatcher []string - -func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.Method) -} - -// Methods adds a matcher for HTTP methods. -// It accepts a sequence of one or more methods to be matched, e.g.: -// "GET", "POST", "PUT". -func (r *Route) Methods(methods ...string) *Route { - for k, v := range methods { - methods[k] = strings.ToUpper(v) - } - return r.addMatcher(methodMatcher(methods)) -} - -// Path ----------------------------------------------------------------------- - -// Path adds a matcher for the URL path. -// It accepts a template with zero or more URL variables enclosed by {}. The -// template must start with a "/". -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -// -// For example: -// -// r := mux.NewRouter() -// r.Path("/products/").Handler(ProductsHandler) -// r.Path("/products/{key}").Handler(ProductsHandler) -// r.Path("/articles/{category}/{id:[0-9]+}"). -// Handler(ArticleHandler) -// -// Variable names must be unique in a given route. They can be retrieved -// calling mux.Vars(request). -func (r *Route) Path(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, false, false) - return r -} - -// PathPrefix ----------------------------------------------------------------- - -// PathPrefix adds a matcher for the URL path prefix. This matches if the given -// template is a prefix of the full URL path. See Route.Path() for details on -// the tpl argument. -// -// Note that it does not treat slashes specially ("/foobar/" will be matched by -// the prefix "/foo") so you may want to use a trailing slash here. -// -// Also note that the setting of Router.StrictSlash() has no effect on routes -// with a PathPrefix matcher. -func (r *Route) PathPrefix(tpl string) *Route { - r.err = r.addRegexpMatcher(tpl, false, true, false) - return r -} - -// Query ---------------------------------------------------------------------- - -// Queries adds a matcher for URL query values. -// It accepts a sequence of key/value pairs. Values may define variables. -// For example: -// -// r := mux.NewRouter() -// r.Queries("foo", "bar", "id", "{id:[0-9]+}") -// -// The above route will only match if the URL contains the defined queries -// values, e.g.: ?foo=bar&id=42. -// -// It the value is an empty string, it will match any value if the key is set. -// -// Variables can define an optional regexp pattern to be matched: -// -// - {name} matches anything until the next slash. -// -// - {name:pattern} matches the given regexp pattern. -func (r *Route) Queries(pairs ...string) *Route { - length := len(pairs) - if length%2 != 0 { - r.err = fmt.Errorf( - "mux: number of parameters must be multiple of 2, got %v", pairs) - return nil - } - for i := 0; i < length; i += 2 { - if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil { - return r - } - } - - return r -} - -// Schemes -------------------------------------------------------------------- - -// schemeMatcher matches the request against URL schemes. -type schemeMatcher []string - -func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { - return matchInArray(m, r.URL.Scheme) -} - -// Schemes adds a matcher for URL schemes. -// It accepts a sequence of schemes to be matched, e.g.: "http", "https". -func (r *Route) Schemes(schemes ...string) *Route { - for k, v := range schemes { - schemes[k] = strings.ToLower(v) - } - return r.addMatcher(schemeMatcher(schemes)) -} - -// BuildVarsFunc -------------------------------------------------------------- - -// BuildVarsFunc is the function signature used by custom build variable -// functions (which can modify route variables before a route's URL is built). -type BuildVarsFunc func(map[string]string) map[string]string - -// BuildVarsFunc adds a custom function to be used to modify build variables -// before a route's URL is built. -func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { - r.buildVarsFunc = f - return r -} - -// Subrouter ------------------------------------------------------------------ - -// Subrouter creates a subrouter for the route. -// -// It will test the inner routes only if the parent route matched. For example: -// -// r := mux.NewRouter() -// s := r.Host("www.domain.com").Subrouter() -// s.HandleFunc("/products/", ProductsHandler) -// s.HandleFunc("/products/{key}", ProductHandler) -// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) -// -// Here, the routes registered in the subrouter won't be tested if the host -// doesn't match. -func (r *Route) Subrouter() *Router { - router := &Router{parent: r, strictSlash: r.strictSlash} - r.addMatcher(router) - return router -} - -// ---------------------------------------------------------------------------- -// URL building -// ---------------------------------------------------------------------------- - -// URL builds a URL for the route. -// -// It accepts a sequence of key/value pairs for the route variables. For -// example, given this route: -// -// r := mux.NewRouter() -// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// ...a URL for it can be built using: -// -// url, err := r.Get("article").URL("category", "technology", "id", "42") -// -// ...which will return an url.URL with the following path: -// -// "/articles/technology/42" -// -// This also works for host variables: -// -// r := mux.NewRouter() -// r.Host("{subdomain}.domain.com"). -// HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). -// Name("article") -// -// // url.String() will be "http://news.domain.com/articles/technology/42" -// url, err := r.Get("article").URL("subdomain", "news", -// "category", "technology", -// "id", "42") -// -// All variables defined in the route are required, and their values must -// conform to the corresponding patterns. -func (r *Route) URL(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil { - return nil, errors.New("mux: route doesn't have a host or path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - var scheme, host, path string - if r.regexp.host != nil { - // Set a default scheme. - scheme = "http" - if host, err = r.regexp.host.url(values); err != nil { - return nil, err - } - } - if r.regexp.path != nil { - if path, err = r.regexp.path.url(values); err != nil { - return nil, err - } - } - return &url.URL{ - Scheme: scheme, - Host: host, - Path: path, - }, nil -} - -// URLHost builds the host part of the URL for a route. See Route.URL(). -// -// The route must have a host defined. -func (r *Route) URLHost(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.host == nil { - return nil, errors.New("mux: route doesn't have a host") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - host, err := r.regexp.host.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Scheme: "http", - Host: host, - }, nil -} - -// URLPath builds the path part of the URL for a route. See Route.URL(). -// -// The route must have a path defined. -func (r *Route) URLPath(pairs ...string) (*url.URL, error) { - if r.err != nil { - return nil, r.err - } - if r.regexp == nil || r.regexp.path == nil { - return nil, errors.New("mux: route doesn't have a path") - } - values, err := r.prepareVars(pairs...) - if err != nil { - return nil, err - } - path, err := r.regexp.path.url(values) - if err != nil { - return nil, err - } - return &url.URL{ - Path: path, - }, nil -} - -// prepareVars converts the route variable pairs into a map. If the route has a -// BuildVarsFunc, it is invoked. -func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { - m, err := mapFromPairsToString(pairs...) - if err != nil { - return nil, err - } - return r.buildVars(m), nil -} - -func (r *Route) buildVars(m map[string]string) map[string]string { - if r.parent != nil { - m = r.parent.buildVars(m) - } - if r.buildVarsFunc != nil { - m = r.buildVarsFunc(m) - } - return m -} - -// ---------------------------------------------------------------------------- -// parentRoute -// ---------------------------------------------------------------------------- - -// parentRoute allows routes to know about parent host and path definitions. -type parentRoute interface { - getNamedRoutes() map[string]*Route - getRegexpGroup() *routeRegexpGroup - buildVars(map[string]string) map[string]string -} - -// getNamedRoutes returns the map where named routes are registered. -func (r *Route) getNamedRoutes() map[string]*Route { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - return r.parent.getNamedRoutes() -} - -// getRegexpGroup returns regexp definitions from this route. -func (r *Route) getRegexpGroup() *routeRegexpGroup { - if r.regexp == nil { - if r.parent == nil { - // During tests router is not always set. - r.parent = NewRouter() - } - regexp := r.parent.getRegexpGroup() - if regexp == nil { - r.regexp = new(routeRegexpGroup) - } else { - // Copy. - r.regexp = &routeRegexpGroup{ - host: regexp.host, - path: regexp.path, - queries: regexp.queries, - } - } - } - return r.regexp -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md b/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md deleted file mode 100644 index bce2ebb51..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/README.md +++ /dev/null @@ -1,39 +0,0 @@ -Consul API client -================= - -This package provides the `api` package which attempts to -provide programmatic access to the full Consul API. - -Currently, all of the Consul APIs included in version 0.3 are supported. - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api) - -Usage -===== - -Below is an example of using the Consul client: - -```go -// Get a new client, with KV endpoints -client, _ := api.NewClient(api.DefaultConfig()) -kv := client.KV() - -// PUT a new KV pair -p := &api.KVPair{Key: "foo", Value: []byte("test")} -_, err := kv.Put(p, nil) -if err != nil { - panic(err) -} - -// Lookup the pair -pair, _, err := kv.Get("foo", nil) -if err != nil { - panic(err) -} -fmt.Printf("KV: %v", pair) - -``` - diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go deleted file mode 100644 index c3fb0d53a..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/acl.go +++ /dev/null @@ -1,140 +0,0 @@ -package api - -const ( - // ACLCLientType is the client type token - ACLClientType = "client" - - // ACLManagementType is the management type token - ACLManagementType = "management" -) - -// ACLEntry is used to represent an ACL entry -type ACLEntry struct { - CreateIndex uint64 - ModifyIndex uint64 - ID string - Name string - Type string - Rules string -} - -// ACL can be used to query the ACL endpoints -type ACL struct { - c *Client -} - -// ACL returns a handle to the ACL endpoints -func (c *Client) ACL() *ACL { - return &ACL{c} -} - -// Create is used to generate a new token with the given parameters -func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/create") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Update is used to update the rules of an existing token -func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/update") - r.setWriteOptions(q) - r.obj = acl - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Destroy is used to destroy a given ACL token ID -func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - return wm, nil -} - -// Clone is used to return a new token cloned from an existing one -func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) { - r := a.c.newRequest("PUT", "/v1/acl/clone/"+id) - r.setWriteOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out struct{ ID string } - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Info is used to query for information about an ACL token -func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/info/"+id) - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to get all the ACL tokens -func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) { - r := a.c.newRequest("GET", "/v1/acl/list") - r.setQueryOptions(q) - rtt, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*ACLEntry - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go deleted file mode 100644 index 2b950d0a3..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/agent.go +++ /dev/null @@ -1,335 +0,0 @@ -package api - -import ( - "fmt" -) - -// AgentCheck represents a check known to the agent -type AgentCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// AgentService represents a service known to the agent -type AgentService struct { - ID string - Service string - Tags []string - Port int - Address string -} - -// AgentMember represents a cluster member known to the agent -type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string - Status int - ProtocolMin uint8 - ProtocolMax uint8 - ProtocolCur uint8 - DelegateMin uint8 - DelegateMax uint8 - DelegateCur uint8 -} - -// AgentServiceRegistration is used to register a new service -type AgentServiceRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks -} - -// AgentCheckRegistration is used to register a new check -type AgentCheckRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Notes string `json:",omitempty"` - ServiceID string `json:",omitempty"` - AgentServiceCheck -} - -// AgentServiceCheck is used to create an associated -// check for a service -type AgentServiceCheck struct { - Script string `json:",omitempty"` - Interval string `json:",omitempty"` - Timeout string `json:",omitempty"` - TTL string `json:",omitempty"` - HTTP string `json:",omitempty"` - TCP string `json:",omitempty"` - Status string `json:",omitempty"` -} -type AgentServiceChecks []*AgentServiceCheck - -// Agent can be used to query the Agent endpoints -type Agent struct { - c *Client - - // cache the node name - nodeName string -} - -// Agent returns a handle to the agent endpoints -func (c *Client) Agent() *Agent { - return &Agent{c: c} -} - -// Self is used to query the agent we are speaking to for -// information about itself -func (a *Agent) Self() (map[string]map[string]interface{}, error) { - r := a.c.newRequest("GET", "/v1/agent/self") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]map[string]interface{} - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// NodeName is used to get the node name of the agent -func (a *Agent) NodeName() (string, error) { - if a.nodeName != "" { - return a.nodeName, nil - } - info, err := a.Self() - if err != nil { - return "", err - } - name := info["Config"]["NodeName"].(string) - a.nodeName = name - return name, nil -} - -// Checks returns the locally registered checks -func (a *Agent) Checks() (map[string]*AgentCheck, error) { - r := a.c.newRequest("GET", "/v1/agent/checks") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentCheck - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Services returns the locally registered services -func (a *Agent) Services() (map[string]*AgentService, error) { - r := a.c.newRequest("GET", "/v1/agent/services") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out map[string]*AgentService - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Members returns the known gossip members. The WAN -// flag can be used to query a server for WAN members. -func (a *Agent) Members(wan bool) ([]*AgentMember, error) { - r := a.c.newRequest("GET", "/v1/agent/members") - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []*AgentMember - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// ServiceRegister is used to register a new service with -// the local agent -func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/service/register") - r.obj = service - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ServiceDeregister is used to deregister a service with -// the local agent -func (a *Agent) ServiceDeregister(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// PassTTL is used to set a TTL check to the passing state -func (a *Agent) PassTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "pass") -} - -// WarnTTL is used to set a TTL check to the warning state -func (a *Agent) WarnTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "warn") -} - -// FailTTL is used to set a TTL check to the failing state -func (a *Agent) FailTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "fail") -} - -// UpdateTTL is used to update the TTL of a check -func (a *Agent) UpdateTTL(checkID, note, status string) error { - switch status { - case "pass": - case "warn": - case "fail": - default: - return fmt.Errorf("Invalid status: %s", status) - } - endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID) - r := a.c.newRequest("PUT", endpoint) - r.params.Set("note", note) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckRegister is used to register a new check with -// the local agent -func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { - r := a.c.newRequest("PUT", "/v1/agent/check/register") - r.obj = check - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// CheckDeregister is used to deregister a check with -// the local agent -func (a *Agent) CheckDeregister(checkID string) error { - r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// Join is used to instruct the agent to attempt a join to -// another cluster member -func (a *Agent) Join(addr string, wan bool) error { - r := a.c.newRequest("PUT", "/v1/agent/join/"+addr) - if wan { - r.params.Set("wan", "1") - } - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// ForceLeave is used to have the agent eject a failed node -func (a *Agent) ForceLeave(node string) error { - r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableServiceMaintenance toggles service maintenance mode on -// for the given service ID. -func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableServiceMaintenance toggles service maintenance mode off -// for the given service ID. -func (a *Agent) DisableServiceMaintenance(serviceID string) error { - r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID) - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// EnableNodeMaintenance toggles node maintenance mode on for the -// agent we are connected to. -func (a *Agent) EnableNodeMaintenance(reason string) error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "true") - r.params.Set("reason", reason) - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} - -// DisableNodeMaintenance toggles node maintenance mode off for the -// agent we are connected to. -func (a *Agent) DisableNodeMaintenance() error { - r := a.c.newRequest("PUT", "/v1/agent/maintenance") - r.params.Set("enable", "false") - _, resp, err := requireOK(a.c.doRequest(r)) - if err != nil { - return err - } - resp.Body.Close() - return nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go deleted file mode 100644 index b0b712834..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/api.go +++ /dev/null @@ -1,442 +0,0 @@ -package api - -import ( - "bytes" - "crypto/tls" - "encoding/json" - "fmt" - "io" - "log" - "net" - "net/http" - "net/url" - "os" - "strconv" - "strings" - "time" -) - -// QueryOptions are used to parameterize a query -type QueryOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // AllowStale allows any Consul server (non-leader) to service - // a read. This allows for lower latency and higher throughput - AllowStale bool - - // RequireConsistent forces the read to be fully consistent. - // This is more expensive but prevents ever performing a stale - // read. - RequireConsistent bool - - // WaitIndex is used to enable a blocking query. Waits - // until the timeout or the next index is reached - WaitIndex uint64 - - // WaitTime is used to bound the duration of a wait. - // Defaults to that of the Config, but can be overridden. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// WriteOptions are used to parameterize a write -type WriteOptions struct { - // Providing a datacenter overwrites the DC provided - // by the Config - Datacenter string - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// QueryMeta is used to return meta data about a query -type QueryMeta struct { - // LastIndex. This can be used as a WaitIndex to perform - // a blocking query - LastIndex uint64 - - // Time of last contact from the leader for the - // server servicing the request - LastContact time.Duration - - // Is there a known leader - KnownLeader bool - - // How long did the request take - RequestTime time.Duration -} - -// WriteMeta is used to return meta data about a write -type WriteMeta struct { - // How long did the request take - RequestTime time.Duration -} - -// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication -type HttpBasicAuth struct { - // Username to use for HTTP Basic Authentication - Username string - - // Password to use for HTTP Basic Authentication - Password string -} - -// Config is used to configure the creation of a client -type Config struct { - // Address is the address of the Consul server - Address string - - // Scheme is the URI scheme for the Consul server - Scheme string - - // Datacenter to use. If not provided, the default agent datacenter is used. - Datacenter string - - // HttpClient is the client to use. Default will be - // used if not provided. - HttpClient *http.Client - - // HttpAuth is the auth info to use for http access. - HttpAuth *HttpBasicAuth - - // WaitTime limits how long a Watch will block. If not provided, - // the agent default values will be used. - WaitTime time.Duration - - // Token is used to provide a per-request ACL token - // which overrides the agent's default token. - Token string -} - -// DefaultConfig returns a default configuration for the client -func DefaultConfig() *Config { - config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - HttpClient: http.DefaultClient, - } - - if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { - config.Address = addr - } - - if token := os.Getenv("CONSUL_HTTP_TOKEN"); token != "" { - config.Token = token - } - - if auth := os.Getenv("CONSUL_HTTP_AUTH"); auth != "" { - var username, password string - if strings.Contains(auth, ":") { - split := strings.SplitN(auth, ":", 2) - username = split[0] - password = split[1] - } else { - username = auth - } - - config.HttpAuth = &HttpBasicAuth{ - Username: username, - Password: password, - } - } - - if ssl := os.Getenv("CONSUL_HTTP_SSL"); ssl != "" { - enabled, err := strconv.ParseBool(ssl) - if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL: %s", err) - } - - if enabled { - config.Scheme = "https" - } - } - - if verify := os.Getenv("CONSUL_HTTP_SSL_VERIFY"); verify != "" { - doVerify, err := strconv.ParseBool(verify) - if err != nil { - log.Printf("[WARN] client: could not parse CONSUL_HTTP_SSL_VERIFY: %s", err) - } - - if !doVerify { - config.HttpClient.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - } - } - } - - return config -} - -// Client provides a client to the Consul API -type Client struct { - config Config -} - -// NewClient returns a new client -func NewClient(config *Config) (*Client, error) { - // bootstrap the config - defConfig := DefaultConfig() - - if len(config.Address) == 0 { - config.Address = defConfig.Address - } - - if len(config.Scheme) == 0 { - config.Scheme = defConfig.Scheme - } - - if config.HttpClient == nil { - config.HttpClient = defConfig.HttpClient - } - - if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 { - config.HttpClient = &http.Client{ - Transport: &http.Transport{ - Dial: func(_, _ string) (net.Conn, error) { - return net.Dial("unix", parts[1]) - }, - }, - } - config.Address = parts[1] - } - - client := &Client{ - config: *config, - } - return client, nil -} - -// request is used to help build up a request -type request struct { - config *Config - method string - url *url.URL - params url.Values - body io.Reader - obj interface{} -} - -// setQueryOptions is used to annotate the request with -// additional query options -func (r *request) setQueryOptions(q *QueryOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.AllowStale { - r.params.Set("stale", "") - } - if q.RequireConsistent { - r.params.Set("consistent", "") - } - if q.WaitIndex != 0 { - r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10)) - } - if q.WaitTime != 0 { - r.params.Set("wait", durToMsec(q.WaitTime)) - } - if q.Token != "" { - r.params.Set("token", q.Token) - } -} - -// durToMsec converts a duration to a millisecond specified string -func durToMsec(dur time.Duration) string { - return fmt.Sprintf("%dms", dur/time.Millisecond) -} - -// setWriteOptions is used to annotate the request with -// additional write options -func (r *request) setWriteOptions(q *WriteOptions) { - if q == nil { - return - } - if q.Datacenter != "" { - r.params.Set("dc", q.Datacenter) - } - if q.Token != "" { - r.params.Set("token", q.Token) - } -} - -// toHTTP converts the request to an HTTP request -func (r *request) toHTTP() (*http.Request, error) { - // Encode the query parameters - r.url.RawQuery = r.params.Encode() - - // Check if we should encode the body - if r.body == nil && r.obj != nil { - if b, err := encodeBody(r.obj); err != nil { - return nil, err - } else { - r.body = b - } - } - - // Create the HTTP request - req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body) - if err != nil { - return nil, err - } - - req.URL.Host = r.url.Host - req.URL.Scheme = r.url.Scheme - req.Host = r.url.Host - - // Setup auth - if r.config.HttpAuth != nil { - req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password) - } - - return req, nil -} - -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { - r := &request{ - config: &c.config, - method: method, - url: &url.URL{ - Scheme: c.config.Scheme, - Host: c.config.Address, - Path: path, - }, - params: make(map[string][]string), - } - if c.config.Datacenter != "" { - r.params.Set("dc", c.config.Datacenter) - } - if c.config.WaitTime != 0 { - r.params.Set("wait", durToMsec(r.config.WaitTime)) - } - if c.config.Token != "" { - r.params.Set("token", r.config.Token) - } - return r -} - -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) { - req, err := r.toHTTP() - if err != nil { - return 0, nil, err - } - start := time.Now() - resp, err := c.config.HttpClient.Do(req) - diff := time.Now().Sub(start) - return diff, resp, err -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - r := c.newRequest("GET", endpoint) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if err := decodeBody(resp, out); err != nil { - return nil, err - } - return qm, nil -} - -// write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - r := c.newRequest("PUT", endpoint) - r.setWriteOptions(q) - r.obj = in - rtt, resp, err := requireOK(c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - if out != nil { - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - } - return wm, nil -} - -// parseQueryMeta is used to help parse query meta-data -func parseQueryMeta(resp *http.Response, q *QueryMeta) error { - header := resp.Header - - // Parse the X-Consul-Index - index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-Index: %v", err) - } - q.LastIndex = index - - // Parse the X-Consul-LastContact - last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64) - if err != nil { - return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err) - } - q.LastContact = time.Duration(last) * time.Millisecond - - // Parse the X-Consul-KnownLeader - switch header.Get("X-Consul-KnownLeader") { - case "true": - q.KnownLeader = true - default: - q.KnownLeader = false - } - return nil -} - -// decodeBody is used to JSON decode a body -func decodeBody(resp *http.Response, out interface{}) error { - dec := json.NewDecoder(resp.Body) - return dec.Decode(out) -} - -// encodeBody is used to encode a request body -func encodeBody(obj interface{}) (io.Reader, error) { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(obj); err != nil { - return nil, err - } - return buf, nil -} - -// requireOK is used to wrap doRequest and check for a 200 -func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) { - if e != nil { - if resp != nil { - resp.Body.Close() - } - return d, nil, e - } - if resp.StatusCode != 200 { - var buf bytes.Buffer - io.Copy(&buf, resp.Body) - resp.Body.Close() - return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes()) - } - return d, resp, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go deleted file mode 100644 index cf64bd909..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/catalog.go +++ /dev/null @@ -1,182 +0,0 @@ -package api - -type Node struct { - Node string - Address string -} - -type CatalogService struct { - Node string - Address string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServicePort int -} - -type CatalogNode struct { - Node *Node - Services map[string]*AgentService -} - -type CatalogRegistration struct { - Node string - Address string - Datacenter string - Service *AgentService - Check *AgentCheck -} - -type CatalogDeregistration struct { - Node string - Address string - Datacenter string - ServiceID string - CheckID string -} - -// Catalog can be used to query the Catalog endpoints -type Catalog struct { - c *Client -} - -// Catalog returns a handle to the catalog endpoints -func (c *Client) Catalog() *Catalog { - return &Catalog{c} -} - -func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/register") - r.setWriteOptions(q) - r.obj = reg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) { - r := c.c.newRequest("PUT", "/v1/catalog/deregister") - r.setWriteOptions(q) - r.obj = dereg - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - resp.Body.Close() - - wm := &WriteMeta{} - wm.RequestTime = rtt - - return wm, nil -} - -// Datacenters is used to query for all the known datacenters -func (c *Catalog) Datacenters() ([]string, error) { - r := c.c.newRequest("GET", "/v1/catalog/datacenters") - _, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var out []string - if err := decodeBody(resp, &out); err != nil { - return nil, err - } - return out, nil -} - -// Nodes is used to query all the known nodes -func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/nodes") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*Node - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Services is used to query for all known services -func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/services") - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out map[string][]string - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query catalog entries for a given service -func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*CatalogService - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Node is used to query for service information about a single node -func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) { - r := c.c.newRequest("GET", "/v1/catalog/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(c.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out *CatalogNode - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go deleted file mode 100644 index 85b5b069b..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/event.go +++ /dev/null @@ -1,104 +0,0 @@ -package api - -import ( - "bytes" - "strconv" -) - -// Event can be used to query the Event endpoints -type Event struct { - c *Client -} - -// UserEvent represents an event that was fired by the user -type UserEvent struct { - ID string - Name string - Payload []byte - NodeFilter string - ServiceFilter string - TagFilter string - Version int - LTime uint64 -} - -// Event returns a handle to the event endpoints -func (c *Client) Event() *Event { - return &Event{c} -} - -// Fire is used to fire a new user event. Only the Name, Payload and Filters -// are respected. This returns the ID or an associated error. Cross DC requests -// are supported. -func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) { - r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name) - r.setWriteOptions(q) - if params.NodeFilter != "" { - r.params.Set("node", params.NodeFilter) - } - if params.ServiceFilter != "" { - r.params.Set("service", params.ServiceFilter) - } - if params.TagFilter != "" { - r.params.Set("tag", params.TagFilter) - } - if params.Payload != nil { - r.body = bytes.NewReader(params.Payload) - } - - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return "", nil, err - } - defer resp.Body.Close() - - wm := &WriteMeta{RequestTime: rtt} - var out UserEvent - if err := decodeBody(resp, &out); err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// List is used to get the most recent events an agent has received. -// This list can be optionally filtered by the name. This endpoint supports -// quasi-blocking queries. The index is not monotonic, nor does it provide provide -// LastContact or KnownLeader. -func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) { - r := e.c.newRequest("GET", "/v1/event/list") - r.setQueryOptions(q) - if name != "" { - r.params.Set("name", name) - } - rtt, resp, err := requireOK(e.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var entries []*UserEvent - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// IDToIndex is a bit of a hack. This simulates the index generation to -// convert an event ID into a WaitIndex. -func (e *Event) IDToIndex(uuid string) uint64 { - lower := uuid[0:8] + uuid[9:13] + uuid[14:18] - upper := uuid[19:23] + uuid[24:36] - lowVal, err := strconv.ParseUint(lower, 16, 64) - if err != nil { - panic("Failed to convert " + lower) - } - highVal, err := strconv.ParseUint(upper, 16, 64) - if err != nil { - panic("Failed to convert " + upper) - } - return lowVal ^ highVal -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go deleted file mode 100644 index 1a273e087..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/health.go +++ /dev/null @@ -1,136 +0,0 @@ -package api - -import ( - "fmt" -) - -// HealthCheck is used to represent a single check -type HealthCheck struct { - Node string - CheckID string - Name string - Status string - Notes string - Output string - ServiceID string - ServiceName string -} - -// ServiceEntry is used for the health service endpoint -type ServiceEntry struct { - Node *Node - Service *AgentService - Checks []*HealthCheck -} - -// Health can be used to query the Health endpoints -type Health struct { - c *Client -} - -// Health returns a handle to the health endpoints -func (c *Client) Health() *Health { - return &Health{c} -} - -// Node is used to query for checks belonging to a given node -func (h *Health) Node(node string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/node/"+node) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*HealthCheck - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Checks is used to return the checks associated with a service -func (h *Health) Checks(service string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/checks/"+service) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*HealthCheck - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// Service is used to query health information along with service info -// for a given service. It can optionally do server-side filtering on a tag -// or nodes with passing health checks only. -func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) { - r := h.c.newRequest("GET", "/v1/health/service/"+service) - r.setQueryOptions(q) - if tag != "" { - r.params.Set("tag", tag) - } - if passingOnly { - r.params.Set("passing", "1") - } - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*ServiceEntry - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} - -// State is used to retrieve all the checks in a given state. -// The wildcard "any" state can also be used for all checks. -func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { - switch state { - case "any": - case "warning": - case "critical": - case "passing": - case "unknown": - default: - return nil, nil, fmt.Errorf("Unsupported state: %v", state) - } - r := h.c.newRequest("GET", "/v1/health/state/"+state) - r.setQueryOptions(q) - rtt, resp, err := requireOK(h.c.doRequest(r)) - if err != nil { - return nil, nil, err - } - defer resp.Body.Close() - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - var out []*HealthCheck - if err := decodeBody(resp, &out); err != nil { - return nil, nil, err - } - return out, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go deleted file mode 100644 index 688b3a09d..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/kv.go +++ /dev/null @@ -1,240 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "io" - "net/http" - "strconv" - "strings" -) - -// KVPair is used to represent a single K/V entry -type KVPair struct { - Key string - CreateIndex uint64 - ModifyIndex uint64 - LockIndex uint64 - Flags uint64 - Value []byte - Session string -} - -// KVPairs is a list of KVPair objects -type KVPairs []*KVPair - -// KV is used to manipulate the K/V API -type KV struct { - c *Client -} - -// KV is used to return a handle to the K/V apis -func (c *Client) KV() *KV { - return &KV{c} -} - -// Get is used to lookup a single key -func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) { - resp, qm, err := k.getInternal(key, nil, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List is used to lookup all keys under a prefix -func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) { - resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []*KVPair - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// Keys is used to list all the keys under a prefix. Optionally, -// a separator can be used to limit the responses. -func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) { - params := map[string]string{"keys": ""} - if separator != "" { - params["separator"] = separator - } - resp, qm, err := k.getInternal(prefix, params, q) - if err != nil { - return nil, nil, err - } - if resp == nil { - return nil, qm, nil - } - defer resp.Body.Close() - - var entries []string - if err := decodeBody(resp, &entries); err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) { - r := k.c.newRequest("GET", "/v1/kv/"+key) - r.setQueryOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := k.c.doRequest(r) - if err != nil { - return nil, nil, err - } - - qm := &QueryMeta{} - parseQueryMeta(resp, qm) - qm.RequestTime = rtt - - if resp.StatusCode == 404 { - resp.Body.Close() - return nil, qm, nil - } else if resp.StatusCode != 200 { - resp.Body.Close() - return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode) - } - return resp, qm, nil -} - -// Put is used to write a new value. Only the -// Key, Flags and Value is respected. -func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) { - params := make(map[string]string, 1) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - _, wm, err := k.put(p.Key, params, p.Value, q) - return wm, err -} - -// CAS is used for a Check-And-Set operation. The Key, -// ModifyIndex, Flags and Value are respected. Returns true -// on success or false on failures. -func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["cas"] = strconv.FormatUint(p.ModifyIndex, 10) - return k.put(p.Key, params, p.Value, q) -} - -// Acquire is used for a lock acquisition operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["acquire"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -// Release is used for a lock release operation. The Key, -// Flags, Value and Session are respected. Returns true -// on success or false on failures. -func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := make(map[string]string, 2) - if p.Flags != 0 { - params["flags"] = strconv.FormatUint(p.Flags, 10) - } - params["release"] = p.Session - return k.put(p.Key, params, p.Value, q) -} - -func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) { - if len(key) > 0 && key[0] == '/' { - return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key) - } - - r := k.c.newRequest("PUT", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - r.body = bytes.NewReader(body) - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - return res, qm, nil -} - -// Delete is used to delete a single key -func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(key, nil, w) - return qm, err -} - -// DeleteCAS is used for a Delete Check-And-Set operation. The Key -// and ModifyIndex are respected. Returns true on success or false on failures. -func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) { - params := map[string]string{ - "cas": strconv.FormatUint(p.ModifyIndex, 10), - } - return k.deleteInternal(p.Key, params, q) -} - -// DeleteTree is used to delete all keys under a prefix -func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) { - _, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w) - return qm, err -} - -func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) { - r := k.c.newRequest("DELETE", "/v1/kv/"+key) - r.setWriteOptions(q) - for param, val := range params { - r.params.Set(param, val) - } - rtt, resp, err := requireOK(k.c.doRequest(r)) - if err != nil { - return false, nil, err - } - defer resp.Body.Close() - - qm := &WriteMeta{} - qm.RequestTime = rtt - - var buf bytes.Buffer - if _, err := io.Copy(&buf, resp.Body); err != nil { - return false, nil, fmt.Errorf("Failed to read response: %v", err) - } - res := strings.Contains(string(buf.Bytes()), "true") - return res, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go deleted file mode 100644 index a76685f04..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/lock.go +++ /dev/null @@ -1,338 +0,0 @@ -package api - -import ( - "fmt" - "sync" - "time" -) - -const ( - // DefaultLockSessionName is the Session Name we assign if none is provided - DefaultLockSessionName = "Consul API Lock" - - // DefaultLockSessionTTL is the default session TTL if no Session is provided - // when creating a new Lock. This is used because we do not have another - // other check to depend upon. - DefaultLockSessionTTL = "15s" - - // DefaultLockWaitTime is how long we block for at a time to check if lock - // acquisition is possible. This affects the minimum time it takes to cancel - // a Lock acquisition. - DefaultLockWaitTime = 15 * time.Second - - // DefaultLockRetryTime is how long we wait after a failed lock acquisition - // before attempting to do the lock again. This is so that once a lock-delay - // is in affect, we do not hot loop retrying the acquisition. - DefaultLockRetryTime = 5 * time.Second - - // LockFlagValue is a magic flag we set to indicate a key - // is being used for a lock. It is used to detect a potential - // conflict with a semaphore. - LockFlagValue = 0x2ddccbc058a50c18 -) - -var ( - // ErrLockHeld is returned if we attempt to double lock - ErrLockHeld = fmt.Errorf("Lock already held") - - // ErrLockNotHeld is returned if we attempt to unlock a lock - // that we do not hold. - ErrLockNotHeld = fmt.Errorf("Lock not held") - - // ErrLockInUse is returned if we attempt to destroy a lock - // that is in use. - ErrLockInUse = fmt.Errorf("Lock in use") - - // ErrLockConflict is returned if the flags on a key - // used for a lock do not match expectation - ErrLockConflict = fmt.Errorf("Existing key does not match lock use") -) - -// Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://consul.io/docs/guides/leader-election.html. -type Lock struct { - c *Client - opts *LockOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// LockOptions is used to parameterize the Lock behavior. -type LockOptions struct { - Key string // Must be set and have write permissions - Value []byte // Optional, value to associate with the lock - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL -} - -// LockKey returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockKey(key string) (*Lock, error) { - opts := &LockOptions{ - Key: key, - } - return c.LockOpts(opts) -} - -// LockOpts returns a handle to a lock struct which can be used -// to acquire and release the mutex. The key used must have -// write permissions. -func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { - if opts.Key == "" { - return nil, fmt.Errorf("missing key") - } - if opts.SessionName == "" { - opts.SessionName = DefaultLockSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultLockSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - l := &Lock{ - c: c, - opts: opts, - } - return l, nil -} - -// Lock attempts to acquire the lock and blocks while doing so. -// Providing a non-nil stopCh can be used to abort the lock attempt. -// Returns a channel that is closed if our lock is lost or an error. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the lock is held until Unlock() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the lock being lost. -func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return nil, ErrLockHeld - } - - // Check if we need to create a session first - l.lockSession = l.opts.Session - if l.lockSession == "" { - if s, err := l.createSession(); err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } else { - l.sessionRenew = make(chan struct{}) - l.lockSession = s - session := l.c.Session() - go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !l.isHeld { - close(l.sessionRenew) - l.sessionRenew = nil - } - }() - } - } - - // Setup the query options - kv := l.c.KV() - qOpts := &QueryOptions{ - WaitTime: DefaultLockWaitTime, - } - -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Look for an existing lock, blocking until not taken - pair, meta, err := kv.Get(l.opts.Key, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read lock: %v", err) - } - if pair != nil && pair.Flags != LockFlagValue { - return nil, ErrLockConflict - } - locked := false - if pair != nil && pair.Session == l.lockSession { - goto HELD - } - if pair != nil && pair.Session != "" { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Try to acquire the lock - pair = l.lockEntry(l.lockSession) - locked, _, err = kv.Acquire(pair, nil) - if err != nil { - return nil, fmt.Errorf("failed to acquire lock: %v", err) - } - - // Handle the case of not getting the lock - if !locked { - // Determine why the lock failed - qOpts.WaitIndex = 0 - pair, meta, err = kv.Get(l.opts.Key, qOpts) - if pair != nil && pair.Session != "" { - //If the session is not null, this means that a wait can safely happen - //using a long poll - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } else { - // If the session is empty and the lock failed to acquire, then it means - // a lock-delay is in effect and a timed wait must be used - select { - case <-time.After(DefaultLockRetryTime): - goto WAIT - case <-stopCh: - return nil, nil - } - } - } - -HELD: - // Watch to ensure we maintain leadership - leaderCh := make(chan struct{}) - go l.monitorLock(l.lockSession, leaderCh) - - // Set that we own the lock - l.isHeld = true - - // Locked! All done - return leaderCh, nil -} - -// Unlock released the lock. It is an error to call this -// if the lock is not currently held. -func (l *Lock) Unlock() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Ensure the lock is actually held - if !l.isHeld { - return ErrLockNotHeld - } - - // Set that we no longer own the lock - l.isHeld = false - - // Stop the session renew - if l.sessionRenew != nil { - defer func() { - close(l.sessionRenew) - l.sessionRenew = nil - }() - } - - // Get the lock entry, and clear the lock session - lockEnt := l.lockEntry(l.lockSession) - l.lockSession = "" - - // Release the lock explicitly - kv := l.c.KV() - _, _, err := kv.Release(lockEnt, nil) - if err != nil { - return fmt.Errorf("failed to release lock: %v", err) - } - return nil -} - -// Destroy is used to cleanup the lock entry. It is not necessary -// to invoke. It will fail if the lock is in use. -func (l *Lock) Destroy() error { - // Hold the lock as we try to release - l.l.Lock() - defer l.l.Unlock() - - // Check if we already hold the lock - if l.isHeld { - return ErrLockHeld - } - - // Look for an existing lock - kv := l.c.KV() - pair, _, err := kv.Get(l.opts.Key, nil) - if err != nil { - return fmt.Errorf("failed to read lock: %v", err) - } - - // Nothing to do if the lock does not exist - if pair == nil { - return nil - } - - // Check for possible flag conflict - if pair.Flags != LockFlagValue { - return ErrLockConflict - } - - // Check if it is in use - if pair.Session != "" { - return ErrLockInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(pair, nil) - if err != nil { - return fmt.Errorf("failed to remove lock: %v", err) - } - if !didRemove { - return ErrLockInUse - } - return nil -} - -// createSession is used to create a new managed session -func (l *Lock) createSession() (string, error) { - session := l.c.Session() - se := &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// lockEntry returns a formatted KVPair for the lock -func (l *Lock) lockEntry(session string) *KVPair { - return &KVPair{ - Key: l.opts.Key, - Value: l.opts.Value, - Session: session, - Flags: LockFlagValue, - } -} - -// monitorLock is a long running routine to monitor a lock ownership -// It closes the stopCh if we lose our leadership. -func (l *Lock) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := l.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - pair, meta, err := kv.Get(l.opts.Key, opts) - if err != nil { - return - } - if pair != nil && pair.Session == session { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go deleted file mode 100644 index 745a208c9..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/raw.go +++ /dev/null @@ -1,24 +0,0 @@ -package api - -// Raw can be used to do raw queries against custom endpoints -type Raw struct { - c *Client -} - -// Raw returns a handle to query endpoints -func (c *Client) Raw() *Raw { - return &Raw{c} -} - -// Query is used to do a GET request against an endpoint -// and deserialize the response into an interface using -// standard Consul conventions. -func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) { - return raw.c.query(endpoint, out, q) -} - -// Write is used to do a PUT request against an endpoint -// and serialize/deserialized using the standard Consul conventions. -func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) { - return raw.c.write(endpoint, in, out, q) -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go deleted file mode 100644 index 4e70be2e7..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/semaphore.go +++ /dev/null @@ -1,477 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "path" - "sync" - "time" -) - -const ( - // DefaultSemaphoreSessionName is the Session Name we assign if none is provided - DefaultSemaphoreSessionName = "Consul API Semaphore" - - // DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided - // when creating a new Semaphore. This is used because we do not have another - // other check to depend upon. - DefaultSemaphoreSessionTTL = "15s" - - // DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore - // acquisition is possible. This affects the minimum time it takes to cancel - // a Semaphore acquisition. - DefaultSemaphoreWaitTime = 15 * time.Second - - // DefaultSemaphoreKey is the key used within the prefix to - // use for coordination between all the contenders. - DefaultSemaphoreKey = ".lock" - - // SemaphoreFlagValue is a magic flag we set to indicate a key - // is being used for a semaphore. It is used to detect a potential - // conflict with a lock. - SemaphoreFlagValue = 0xe0f69a2baa414de0 -) - -var ( - // ErrSemaphoreHeld is returned if we attempt to double lock - ErrSemaphoreHeld = fmt.Errorf("Semaphore already held") - - // ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore - // that we do not hold. - ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held") - - // ErrSemaphoreInUse is returned if we attempt to destroy a semaphore - // that is in use. - ErrSemaphoreInUse = fmt.Errorf("Semaphore in use") - - // ErrSemaphoreConflict is returned if the flags on a key - // used for a semaphore do not match expectation - ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use") -) - -// Semaphore is used to implement a distributed semaphore -// using the Consul KV primitives. -type Semaphore struct { - c *Client - opts *SemaphoreOptions - - isHeld bool - sessionRenew chan struct{} - lockSession string - l sync.Mutex -} - -// SemaphoreOptions is used to parameterize the Semaphore -type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL -} - -// semaphoreLock is written under the DefaultSemaphoreKey and -// is used to coordinate between all the contenders. -type semaphoreLock struct { - // Limit is the integer limit of holders. This is used to - // verify that all the holders agree on the value. - Limit int - - // Holders is a list of all the semaphore holders. - // It maps the session ID to true. It is used as a set effectively. - Holders map[string]bool -} - -// SemaphorePrefix is used to created a Semaphore which will operate -// at the given KV prefix and uses the given limit for the semaphore. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. -func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) { - opts := &SemaphoreOptions{ - Prefix: prefix, - Limit: limit, - } - return c.SemaphoreOpts(opts) -} - -// SemaphoreOpts is used to create a Semaphore with the given options. -// The prefix must have write privileges, and the limit must be agreed -// upon by all contenders. If a Session is not provided, one will be created. -func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { - if opts.Prefix == "" { - return nil, fmt.Errorf("missing prefix") - } - if opts.Limit <= 0 { - return nil, fmt.Errorf("semaphore limit must be positive") - } - if opts.SessionName == "" { - opts.SessionName = DefaultSemaphoreSessionName - } - if opts.SessionTTL == "" { - opts.SessionTTL = DefaultSemaphoreSessionTTL - } else { - if _, err := time.ParseDuration(opts.SessionTTL); err != nil { - return nil, fmt.Errorf("invalid SessionTTL: %v", err) - } - } - s := &Semaphore{ - c: c, - opts: opts, - } - return s, nil -} - -// Acquire attempts to reserve a slot in the semaphore, blocking until -// success, interrupted via the stopCh or an error is encountered. -// Providing a non-nil stopCh can be used to abort the attempt. -// On success, a channel is returned that represents our slot. -// This channel could be closed at any time due to session invalidation, -// communication errors, operator intervention, etc. It is NOT safe to -// assume that the slot is held until Release() unless the Session is specifically -// created without any associated health checks. By default Consul sessions -// prefer liveness over safety and an application must be able to handle -// the session being lost. -func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return nil, ErrSemaphoreHeld - } - - // Check if we need to create a session first - s.lockSession = s.opts.Session - if s.lockSession == "" { - if sess, err := s.createSession(); err != nil { - return nil, fmt.Errorf("failed to create session: %v", err) - } else { - s.sessionRenew = make(chan struct{}) - s.lockSession = sess - session := s.c.Session() - go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew) - - // If we fail to acquire the lock, cleanup the session - defer func() { - if !s.isHeld { - close(s.sessionRenew) - s.sessionRenew = nil - } - }() - } - } - - // Create the contender entry - kv := s.c.KV() - made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil) - if err != nil || !made { - return nil, fmt.Errorf("failed to make contender entry: %v", err) - } - - // Setup the query options - qOpts := &QueryOptions{ - WaitTime: DefaultSemaphoreWaitTime, - } - -WAIT: - // Check if we should quit - select { - case <-stopCh: - return nil, nil - default: - } - - // Read the prefix - pairs, meta, err := kv.List(s.opts.Prefix, qOpts) - if err != nil { - return nil, fmt.Errorf("failed to read prefix: %v", err) - } - - // Decode the lock - lockPair := s.findLock(pairs) - if lockPair.Flags != SemaphoreFlagValue { - return nil, ErrSemaphoreConflict - } - lock, err := s.decodeLock(lockPair) - if err != nil { - return nil, err - } - - // Verify we agree with the limit - if lock.Limit != s.opts.Limit { - return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)", - lock.Limit, s.opts.Limit) - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if the lock is held - if len(lock.Holders) >= lock.Limit { - qOpts.WaitIndex = meta.LastIndex - goto WAIT - } - - // Create a new lock with us as a holder - lock.Holders[s.lockSession] = true - newLock, err := s.encodeLock(lock, lockPair.ModifyIndex) - if err != nil { - return nil, err - } - - // Attempt the acquisition - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return nil, fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - // Update failed, could have been a race with another contender, - // retry the operation - goto WAIT - } - - // Watch to ensure we maintain ownership of the slot - lockCh := make(chan struct{}) - go s.monitorLock(s.lockSession, lockCh) - - // Set that we own the lock - s.isHeld = true - - // Acquired! All done - return lockCh, nil -} - -// Release is used to voluntarily give up our semaphore slot. It is -// an error to call this if the semaphore has not been acquired. -func (s *Semaphore) Release() error { - // Hold the lock as we try to release - s.l.Lock() - defer s.l.Unlock() - - // Ensure the lock is actually held - if !s.isHeld { - return ErrSemaphoreNotHeld - } - - // Set that we no longer own the lock - s.isHeld = false - - // Stop the session renew - if s.sessionRenew != nil { - defer func() { - close(s.sessionRenew) - s.sessionRenew = nil - }() - } - - // Get and clear the lock session - lockSession := s.lockSession - s.lockSession = "" - - // Remove ourselves as a lock holder - kv := s.c.KV() - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) -READ: - pair, _, err := kv.Get(key, nil) - if err != nil { - return err - } - if pair == nil { - pair = &KVPair{} - } - lock, err := s.decodeLock(pair) - if err != nil { - return err - } - - // Create a new lock without us as a holder - if _, ok := lock.Holders[lockSession]; ok { - delete(lock.Holders, lockSession) - newLock, err := s.encodeLock(lock, pair.ModifyIndex) - if err != nil { - return err - } - - // Swap the locks - didSet, _, err := kv.CAS(newLock, nil) - if err != nil { - return fmt.Errorf("failed to update lock: %v", err) - } - if !didSet { - goto READ - } - } - - // Destroy the contender entry - contenderKey := path.Join(s.opts.Prefix, lockSession) - if _, err := kv.Delete(contenderKey, nil); err != nil { - return err - } - return nil -} - -// Destroy is used to cleanup the semaphore entry. It is not necessary -// to invoke. It will fail if the semaphore is in use. -func (s *Semaphore) Destroy() error { - // Hold the lock as we try to acquire - s.l.Lock() - defer s.l.Unlock() - - // Check if we already hold the semaphore - if s.isHeld { - return ErrSemaphoreHeld - } - - // List for the semaphore - kv := s.c.KV() - pairs, _, err := kv.List(s.opts.Prefix, nil) - if err != nil { - return fmt.Errorf("failed to read prefix: %v", err) - } - - // Find the lock pair, bail if it doesn't exist - lockPair := s.findLock(pairs) - if lockPair.ModifyIndex == 0 { - return nil - } - if lockPair.Flags != SemaphoreFlagValue { - return ErrSemaphoreConflict - } - - // Decode the lock - lock, err := s.decodeLock(lockPair) - if err != nil { - return err - } - - // Prune the dead holders - s.pruneDeadHolders(lock, pairs) - - // Check if there are any holders - if len(lock.Holders) > 0 { - return ErrSemaphoreInUse - } - - // Attempt the delete - didRemove, _, err := kv.DeleteCAS(lockPair, nil) - if err != nil { - return fmt.Errorf("failed to remove semaphore: %v", err) - } - if !didRemove { - return ErrSemaphoreInUse - } - return nil -} - -// createSession is used to create a new managed session -func (s *Semaphore) createSession() (string, error) { - session := s.c.Session() - se := &SessionEntry{ - Name: s.opts.SessionName, - TTL: s.opts.SessionTTL, - Behavior: SessionBehaviorDelete, - } - id, _, err := session.Create(se, nil) - if err != nil { - return "", err - } - return id, nil -} - -// contenderEntry returns a formatted KVPair for the contender -func (s *Semaphore) contenderEntry(session string) *KVPair { - return &KVPair{ - Key: path.Join(s.opts.Prefix, session), - Value: s.opts.Value, - Session: session, - Flags: SemaphoreFlagValue, - } -} - -// findLock is used to find the KV Pair which is used for coordination -func (s *Semaphore) findLock(pairs KVPairs) *KVPair { - key := path.Join(s.opts.Prefix, DefaultSemaphoreKey) - for _, pair := range pairs { - if pair.Key == key { - return pair - } - } - return &KVPair{Flags: SemaphoreFlagValue} -} - -// decodeLock is used to decode a semaphoreLock from an -// entry in Consul -func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) { - // Handle if there is no lock - if pair == nil || pair.Value == nil { - return &semaphoreLock{ - Limit: s.opts.Limit, - Holders: make(map[string]bool), - }, nil - } - - l := &semaphoreLock{} - if err := json.Unmarshal(pair.Value, l); err != nil { - return nil, fmt.Errorf("lock decoding failed: %v", err) - } - return l, nil -} - -// encodeLock is used to encode a semaphoreLock into a KVPair -// that can be PUT -func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) { - enc, err := json.Marshal(l) - if err != nil { - return nil, fmt.Errorf("lock encoding failed: %v", err) - } - pair := &KVPair{ - Key: path.Join(s.opts.Prefix, DefaultSemaphoreKey), - Value: enc, - Flags: SemaphoreFlagValue, - ModifyIndex: oldIndex, - } - return pair, nil -} - -// pruneDeadHolders is used to remove all the dead lock holders -func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) { - // Gather all the live holders - alive := make(map[string]struct{}, len(pairs)) - for _, pair := range pairs { - if pair.Session != "" { - alive[pair.Session] = struct{}{} - } - } - - // Remove any holders that are dead - for holder := range lock.Holders { - if _, ok := alive[holder]; !ok { - delete(lock.Holders, holder) - } - } -} - -// monitorLock is a long running routine to monitor a semaphore ownership -// It closes the stopCh if we lose our slot. -func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { - defer close(stopCh) - kv := s.c.KV() - opts := &QueryOptions{RequireConsistent: true} -WAIT: - pairs, meta, err := kv.List(s.opts.Prefix, opts) - if err != nil { - return - } - lockPair := s.findLock(pairs) - lock, err := s.decodeLock(lockPair) - if err != nil { - return - } - s.pruneDeadHolders(lock, pairs) - if _, ok := lock.Holders[session]; ok { - opts.WaitIndex = meta.LastIndex - goto WAIT - } -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go deleted file mode 100644 index 574738127..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/session.go +++ /dev/null @@ -1,201 +0,0 @@ -package api - -import ( - "fmt" - "time" -) - -const ( - // SessionBehaviorRelease is the default behavior and causes - // all associated locks to be released on session invalidation. - SessionBehaviorRelease = "release" - - // SessionBehaviorDelete is new in Consul 0.5 and changes the - // behavior to delete all associated locks on session invalidation. - // It can be used in a way similar to Ephemeral Nodes in ZooKeeper. - SessionBehaviorDelete = "delete" -) - -// SessionEntry represents a session in consul -type SessionEntry struct { - CreateIndex uint64 - ID string - Name string - Node string - Checks []string - LockDelay time.Duration - Behavior string - TTL string -} - -// Session can be used to query the Session endpoints -type Session struct { - c *Client -} - -// Session returns a handle to the session endpoints -func (c *Client) Session() *Session { - return &Session{c} -} - -// CreateNoChecks is like Create but is used specifically to create -// a session with no associated health checks. -func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - body := make(map[string]interface{}) - body["Checks"] = []string{} - if se != nil { - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(body, q) - -} - -// Create makes a new session. Providing a session entry can -// customize the session. It can also be nil to use defaults. -func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) { - var obj interface{} - if se != nil { - body := make(map[string]interface{}) - obj = body - if se.Name != "" { - body["Name"] = se.Name - } - if se.Node != "" { - body["Node"] = se.Node - } - if se.LockDelay != 0 { - body["LockDelay"] = durToMsec(se.LockDelay) - } - if len(se.Checks) > 0 { - body["Checks"] = se.Checks - } - if se.Behavior != "" { - body["Behavior"] = se.Behavior - } - if se.TTL != "" { - body["TTL"] = se.TTL - } - } - return s.create(obj, q) -} - -func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) { - var out struct{ ID string } - wm, err := s.c.write("/v1/session/create", obj, &out, q) - if err != nil { - return "", nil, err - } - return out.ID, wm, nil -} - -// Destroy invalidates a given session -func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) { - wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q) - if err != nil { - return nil, err - } - return wm, nil -} - -// Renew renews the TTL on a given session -func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) { - var entries []*SessionEntry - wm, err := s.c.write("/v1/session/renew/"+id, nil, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], wm, nil - } - return nil, wm, nil -} - -// RenewPeriodic is used to periodically invoke Session.Renew on a -// session until a doneCh is closed. This is meant to be used in a long running -// goroutine to ensure a session stays valid. -func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error { - ttl, err := time.ParseDuration(initialTTL) - if err != nil { - return err - } - - waitDur := ttl / 2 - lastRenewTime := time.Now() - var lastErr error - for { - if time.Since(lastRenewTime) > ttl { - return lastErr - } - select { - case <-time.After(waitDur): - entry, _, err := s.Renew(id, q) - if err != nil { - waitDur = time.Second - lastErr = err - continue - } - if entry == nil { - waitDur = time.Second - lastErr = fmt.Errorf("No SessionEntry returned") - continue - } - - // Handle the server updating the TTL - ttl, _ = time.ParseDuration(entry.TTL) - waitDur = ttl / 2 - lastRenewTime = time.Now() - - case <-doneCh: - // Attempt a session destroy - s.Destroy(id, q) - return nil - } - } -} - -// Info looks up a single session -func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/info/"+id, &entries, q) - if err != nil { - return nil, nil, err - } - if len(entries) > 0 { - return entries[0], qm, nil - } - return nil, qm, nil -} - -// List gets sessions for a node -func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/node/"+node, &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} - -// List gets all active sessions -func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) { - var entries []*SessionEntry - qm, err := s.c.query("/v1/session/list", &entries, q) - if err != nil { - return nil, nil, err - } - return entries, qm, nil -} diff --git a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go b/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go deleted file mode 100644 index 74ef61a67..000000000 --- a/Godeps/_workspace/src/github.com/hashicorp/consul/api/status.go +++ /dev/null @@ -1,43 +0,0 @@ -package api - -// Status can be used to query the Status endpoints -type Status struct { - c *Client -} - -// Status returns a handle to the status endpoints -func (c *Client) Status() *Status { - return &Status{c} -} - -// Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { - r := s.c.newRequest("GET", "/v1/status/leader") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - var leader string - if err := decodeBody(resp, &leader); err != nil { - return "", err - } - return leader, nil -} - -// Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { - r := s.c.newRequest("GET", "/v1/status/peers") - _, resp, err := requireOK(s.c.doRequest(r)) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var peers []string - if err := decodeBody(resp, &peers); err != nil { - return nil, err - } - return peers, nil -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/cbreaker.go b/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/cbreaker.go deleted file mode 100644 index f665b32b3..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/cbreaker.go +++ /dev/null @@ -1,359 +0,0 @@ -// package cbreaker implements circuit breaker similar to https://github.com/Netflix/Hystrix/wiki/How-it-Works -// -// Vulcan circuit breaker watches the error condtion to match -// after which it activates the fallback scenario, e.g. returns the response code -// or redirects the request to another location - -// Circuit breakers start in the Standby state first, observing responses and watching location metrics. -// -// Once the Circuit breaker condition is met, it enters the "Tripped" state, where it activates fallback scenario -// for all requests during the FallbackDuration time period and reset the stats for the location. -// -// After FallbackDuration time period passes, Circuit breaker enters "Recovering" state, during that state it will -// start passing some traffic back to the endpoints, increasing the amount of passed requests using linear function: -// -// allowedRequestsRatio = 0.5 * (Now() - StartRecovery())/RecoveryDuration -// -// Two scenarios are possible in the "Recovering" state: -// 1. Condition matches again, this will reset the state to "Tripped" and reset the timer. -// 2. Condition does not match, circuit breaker enters "Standby" state -// -// It is possible to define actions (e.g. webhooks) of transitions between states: -// -// * OnTripped action is called on transition (Standby -> Tripped) -// * OnStandby action is called on transition (Recovering -> Standby) -// -package cbreaker - -import ( - "fmt" - "net/http" - "sync" - "time" - - "github.com/mailgun/oxy/memmetrics" - "github.com/mailgun/oxy/utils" - "github.com/mailgun/timetools" -) - -// CircuitBreaker is http.Handler that implements circuit breaker pattern -type CircuitBreaker struct { - m *sync.RWMutex - metrics *memmetrics.RTMetrics - - condition hpredicate - duration time.Duration - - fallbackDuration time.Duration - recoveryDuration time.Duration - - onTripped SideEffect - onStandby SideEffect - - state cbState - until time.Time - - rc *ratioController - - checkPeriod time.Duration - lastCheck time.Time - - fallback http.Handler - next http.Handler - - log utils.Logger - clock timetools.TimeProvider -} - -// New creates a new CircuitBreaker middleware -func New(next http.Handler, expression string, options ...CircuitBreakerOption) (*CircuitBreaker, error) { - cb := &CircuitBreaker{ - m: &sync.RWMutex{}, - next: next, - // Default values. Might be overwritten by options below. - clock: &timetools.RealTime{}, - checkPeriod: defaultCheckPeriod, - fallbackDuration: defaultFallbackDuration, - recoveryDuration: defaultRecoveryDuration, - fallback: defaultFallback, - log: utils.NullLogger, - } - - for _, s := range options { - if err := s(cb); err != nil { - return nil, err - } - } - - condition, err := parseExpression(expression) - if err != nil { - return nil, err - } - cb.condition = condition - - mt, err := memmetrics.NewRTMetrics() - if err != nil { - return nil, err - } - cb.metrics = mt - - return cb, nil -} - -func (c *CircuitBreaker) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if c.activateFallback(w, req) { - c.fallback.ServeHTTP(w, req) - return - } - c.serve(w, req) -} - -func (c *CircuitBreaker) Wrap(next http.Handler) { - c.next = next -} - -// updateState updates internal state and returns true if fallback should be used and false otherwise -func (c *CircuitBreaker) activateFallback(w http.ResponseWriter, req *http.Request) bool { - // Quick check with read locks optimized for normal operation use-case - if c.isStandby() { - return false - } - // Circuit breaker is in tripped or recovering state - c.m.Lock() - defer c.m.Unlock() - - c.log.Infof("%v is in error state", c) - - switch c.state { - case stateStandby: - // someone else has set it to standby just now - return false - case stateTripped: - if c.clock.UtcNow().Before(c.until) { - return true - } - // We have been in active state enough, enter recovering state - c.setRecovering() - fallthrough - case stateRecovering: - // We have been in recovering state enough, enter standby and allow request - if c.clock.UtcNow().After(c.until) { - c.setState(stateStandby, c.clock.UtcNow()) - return false - } - // ratio controller allows this request - if c.rc.allowRequest() { - return false - } - return true - } - return false -} - -func (c *CircuitBreaker) serve(w http.ResponseWriter, req *http.Request) { - start := c.clock.UtcNow() - p := &utils.ProxyWriter{W: w} - - c.next.ServeHTTP(p, req) - - latency := c.clock.UtcNow().Sub(start) - c.metrics.Record(p.Code, latency) - - // Note that this call is less expensive than it looks -- checkCondition only performs the real check - // periodically. Because of that we can afford to call it here on every single response. - c.checkAndSet() -} - -func (c *CircuitBreaker) isStandby() bool { - c.m.RLock() - defer c.m.RUnlock() - return c.state == stateStandby -} - -// String returns log-friendly representation of the circuit breaker state -func (c *CircuitBreaker) String() string { - switch c.state { - case stateTripped, stateRecovering: - return fmt.Sprintf("CircuitBreaker(state=%v, until=%v)", c.state, c.until) - default: - return fmt.Sprintf("CircuitBreaker(state=%v)", c.state) - } -} - -// exec executes side effect -func (c *CircuitBreaker) exec(s SideEffect) { - if s == nil { - return - } - go func() { - if err := s.Exec(); err != nil { - c.log.Errorf("%v side effect failure: %v", c, err) - } - }() -} - -func (c *CircuitBreaker) setState(new cbState, until time.Time) { - c.log.Infof("%v setting state to %v, until %v", c, new, until) - c.state = new - c.until = until - switch new { - case stateTripped: - c.exec(c.onTripped) - case stateStandby: - c.exec(c.onStandby) - } -} - -func (c *CircuitBreaker) timeToCheck() bool { - c.m.RLock() - defer c.m.RUnlock() - return c.clock.UtcNow().After(c.lastCheck) -} - -// Checks if tripping condition matches and sets circuit breaker to the tripped state -func (c *CircuitBreaker) checkAndSet() { - if !c.timeToCheck() { - return - } - - c.m.Lock() - defer c.m.Unlock() - - // Other goroutine could have updated the lastCheck variable before we grabbed mutex - if !c.clock.UtcNow().After(c.lastCheck) { - return - } - c.lastCheck = c.clock.UtcNow().Add(c.checkPeriod) - - if c.state == stateTripped { - c.log.Infof("%v skip set tripped", c) - return - } - - if !c.condition(c) { - return - } - - c.setState(stateTripped, c.clock.UtcNow().Add(c.fallbackDuration)) - c.metrics.Reset() -} - -func (c *CircuitBreaker) setRecovering() { - c.setState(stateRecovering, c.clock.UtcNow().Add(c.recoveryDuration)) - c.rc = newRatioController(c.clock, c.recoveryDuration) -} - -// CircuitBreakerOption represents an option you can pass to New. -// See the documentation for the individual options below. -type CircuitBreakerOption func(*CircuitBreaker) error - -// Clock allows you to fake che CircuitBreaker's view of the current time. -// Intended for unit tests. -func Clock(clock timetools.TimeProvider) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.clock = clock - return nil - } -} - -// FallbackDuration is how long the CircuitBreaker will remain in the Tripped -// state before trying to recover. -func FallbackDuration(d time.Duration) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.fallbackDuration = d - return nil - } -} - -// RecoveryDuration is how long the CircuitBreaker will take to ramp up -// requests during the Recovering state. -func RecoveryDuration(d time.Duration) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.recoveryDuration = d - return nil - } -} - -// CheckPeriod is how long the CircuitBreaker will wait between successive -// checks of the breaker condition. -func CheckPeriod(d time.Duration) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.checkPeriod = d - return nil - } -} - -// OnTripped sets a SideEffect to run when entering the Tripped state. -// Only one SideEffect can be set for this hook. -func OnTripped(s SideEffect) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.onTripped = s - return nil - } -} - -// OnTripped sets a SideEffect to run when entering the Standby state. -// Only one SideEffect can be set for this hook. -func OnStandby(s SideEffect) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.onStandby = s - return nil - } -} - -// Fallback defines the http.Handler that the CircuitBreaker should route -// requests to when it prevents a request from taking its normal path. -func Fallback(h http.Handler) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.fallback = h - return nil - } -} - -// Logger adds logging for the CircuitBreaker. -func Logger(l utils.Logger) CircuitBreakerOption { - return func(c *CircuitBreaker) error { - c.log = l - return nil - } -} - -// cbState is the state of the circuit breaker -type cbState int - -func (s cbState) String() string { - switch s { - case stateStandby: - return "standby" - case stateTripped: - return "tripped" - case stateRecovering: - return "recovering" - } - return "undefined" -} - -const ( - // CircuitBreaker is passing all requests and watching stats - stateStandby = iota - // CircuitBreaker activates fallback scenario for all requests - stateTripped - // CircuitBreaker passes some requests to go through, rejecting others - stateRecovering -) - -const ( - defaultFallbackDuration = 10 * time.Second - defaultRecoveryDuration = 10 * time.Second - defaultCheckPeriod = 100 * time.Millisecond -) - -var defaultFallback = &fallback{} - -type fallback struct { -} - -func (f *fallback) ServeHTTP(w http.ResponseWriter, req *http.Request) { - w.WriteHeader(http.StatusServiceUnavailable) - w.Write([]byte(http.StatusText(http.StatusServiceUnavailable))) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/effect.go b/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/effect.go deleted file mode 100644 index be972dae9..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/effect.go +++ /dev/null @@ -1,78 +0,0 @@ -package cbreaker - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/mailgun/log" - "github.com/mailgun/oxy/utils" -) - -type SideEffect interface { - Exec() error -} - -type Webhook struct { - URL string - Method string - Headers http.Header - Form url.Values - Body []byte -} - -type WebhookSideEffect struct { - w Webhook -} - -func NewWebhookSideEffect(w Webhook) (*WebhookSideEffect, error) { - if w.Method == "" { - return nil, fmt.Errorf("Supply method") - } - _, err := url.Parse(w.URL) - if err != nil { - return nil, err - } - - return &WebhookSideEffect{w: w}, nil -} - -func (w *WebhookSideEffect) getBody() io.Reader { - if len(w.w.Form) != 0 { - return strings.NewReader(w.w.Form.Encode()) - } - if len(w.w.Body) != 0 { - return bytes.NewBuffer(w.w.Body) - } - return nil -} - -func (w *WebhookSideEffect) Exec() error { - r, err := http.NewRequest(w.w.Method, w.w.URL, w.getBody()) - if err != nil { - return err - } - if len(w.w.Headers) != 0 { - utils.CopyHeaders(r.Header, w.w.Headers) - } - if len(w.w.Form) != 0 { - r.Header.Set("Content-Type", "application/x-www-form-urlencoded") - } - re, err := http.DefaultClient.Do(r) - if err != nil { - return err - } - if re.Body != nil { - defer re.Body.Close() - } - body, err := ioutil.ReadAll(re.Body) - if err != nil { - return err - } - log.Infof("%v got response: (%s): %s", w, re.Status, string(body)) - return nil -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/fallback.go b/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/fallback.go deleted file mode 100644 index 9bd6678c1..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/fallback.go +++ /dev/null @@ -1,56 +0,0 @@ -package cbreaker - -import ( - "fmt" - "net/http" - "net/url" - "strconv" -) - -type Response struct { - StatusCode int - ContentType string - Body []byte -} - -type ResponseFallback struct { - r Response -} - -func NewResponseFallback(r Response) (*ResponseFallback, error) { - if r.StatusCode == 0 { - return nil, fmt.Errorf("response code should not be 0") - } - return &ResponseFallback{r: r}, nil -} - -func (f *ResponseFallback) ServeHTTP(w http.ResponseWriter, req *http.Request) { - if f.r.ContentType != "" { - w.Header().Set("Content-Type", f.r.ContentType) - } - w.Header().Set("Content-Length", strconv.Itoa(len(f.r.Body))) - w.WriteHeader(f.r.StatusCode) - w.Write(f.r.Body) -} - -type Redirect struct { - URL string -} - -type RedirectFallback struct { - u *url.URL -} - -func NewRedirectFallback(r Redirect) (*RedirectFallback, error) { - u, err := url.ParseRequestURI(r.URL) - if err != nil { - return nil, err - } - return &RedirectFallback{u: u}, nil -} - -func (f *RedirectFallback) ServeHTTP(w http.ResponseWriter, req *http.Request) { - w.Header().Set("Location", f.u.String()) - w.WriteHeader(http.StatusFound) - w.Write([]byte(http.StatusText(http.StatusFound))) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/predicates.go b/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/predicates.go deleted file mode 100644 index c21c374f4..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/predicates.go +++ /dev/null @@ -1,232 +0,0 @@ -package cbreaker - -import ( - "fmt" - "time" - - "github.com/mailgun/predicate" -) - -type hpredicate func(*CircuitBreaker) bool - -// parseExpression parses expression in the go language into predicates. -func parseExpression(in string) (hpredicate, error) { - p, err := predicate.NewParser(predicate.Def{ - Operators: predicate.Operators{ - AND: and, - OR: or, - EQ: eq, - NEQ: neq, - LT: lt, - LE: le, - GT: gt, - GE: ge, - }, - Functions: map[string]interface{}{ - "LatencyAtQuantileMS": latencyAtQuantile, - "NetworkErrorRatio": networkErrorRatio, - "ResponseCodeRatio": responseCodeRatio, - }, - }) - if err != nil { - return nil, err - } - out, err := p.Parse(in) - if err != nil { - return nil, err - } - pr, ok := out.(hpredicate) - if !ok { - return nil, fmt.Errorf("expected predicate, got %T", out) - } - return pr, nil -} - -type toInt func(c *CircuitBreaker) int -type toFloat64 func(c *CircuitBreaker) float64 - -func latencyAtQuantile(quantile float64) toInt { - return func(c *CircuitBreaker) int { - h, err := c.metrics.LatencyHistogram() - if err != nil { - c.log.Errorf("Failed to get latency histogram, for %v error: %v", c, err) - return 0 - } - return int(h.LatencyAtQuantile(quantile) / time.Millisecond) - } -} - -func networkErrorRatio() toFloat64 { - return func(c *CircuitBreaker) float64 { - return c.metrics.NetworkErrorRatio() - } -} - -func responseCodeRatio(startA, endA, startB, endB int) toFloat64 { - return func(c *CircuitBreaker) float64 { - return c.metrics.ResponseCodeRatio(startA, endA, startB, endB) - } -} - -// or returns predicate by joining the passed predicates with logical 'or' -func or(fns ...hpredicate) hpredicate { - return func(c *CircuitBreaker) bool { - for _, fn := range fns { - if fn(c) { - return true - } - } - return false - } -} - -// and returns predicate by joining the passed predicates with logical 'and' -func and(fns ...hpredicate) hpredicate { - return func(c *CircuitBreaker) bool { - for _, fn := range fns { - if !fn(c) { - return false - } - } - return true - } -} - -// not creates negation of the passed predicate -func not(p hpredicate) hpredicate { - return func(c *CircuitBreaker) bool { - return !p(c) - } -} - -// eq returns predicate that tests for equality of the value of the mapper and the constant -func eq(m interface{}, value interface{}) (hpredicate, error) { - switch mapper := m.(type) { - case toInt: - return intEQ(mapper, value) - case toFloat64: - return float64EQ(mapper, value) - } - return nil, fmt.Errorf("eq: unsupported argument: %T", m) -} - -// neq returns predicate that tests for inequality of the value of the mapper and the constant -func neq(m interface{}, value interface{}) (hpredicate, error) { - p, err := eq(m, value) - if err != nil { - return nil, err - } - return not(p), nil -} - -// lt returns predicate that tests that value of the mapper function is less than the constant -func lt(m interface{}, value interface{}) (hpredicate, error) { - switch mapper := m.(type) { - case toInt: - return intLT(mapper, value) - case toFloat64: - return float64LT(mapper, value) - } - return nil, fmt.Errorf("lt: unsupported argument: %T", m) -} - -// le returns predicate that tests that value of the mapper function is less or equal than the constant -func le(m interface{}, value interface{}) (hpredicate, error) { - l, err := lt(m, value) - if err != nil { - return nil, err - } - e, err := eq(m, value) - if err != nil { - return nil, err - } - return func(c *CircuitBreaker) bool { - return l(c) || e(c) - }, nil -} - -// gt returns predicate that tests that value of the mapper function is greater than the constant -func gt(m interface{}, value interface{}) (hpredicate, error) { - switch mapper := m.(type) { - case toInt: - return intGT(mapper, value) - case toFloat64: - return float64GT(mapper, value) - } - return nil, fmt.Errorf("gt: unsupported argument: %T", m) -} - -// ge returns predicate that tests that value of the mapper function is less or equal than the constant -func ge(m interface{}, value interface{}) (hpredicate, error) { - g, err := gt(m, value) - if err != nil { - return nil, err - } - e, err := eq(m, value) - if err != nil { - return nil, err - } - return func(c *CircuitBreaker) bool { - return g(c) || e(c) - }, nil -} - -func intEQ(m toInt, val interface{}) (hpredicate, error) { - value, ok := val.(int) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *CircuitBreaker) bool { - return m(c) == value - }, nil -} - -func float64EQ(m toFloat64, val interface{}) (hpredicate, error) { - value, ok := val.(float64) - if !ok { - return nil, fmt.Errorf("expected float64, got %T", val) - } - return func(c *CircuitBreaker) bool { - return m(c) == value - }, nil -} - -func intLT(m toInt, val interface{}) (hpredicate, error) { - value, ok := val.(int) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *CircuitBreaker) bool { - return m(c) < value - }, nil -} - -func intGT(m toInt, val interface{}) (hpredicate, error) { - value, ok := val.(int) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *CircuitBreaker) bool { - return m(c) > value - }, nil -} - -func float64LT(m toFloat64, val interface{}) (hpredicate, error) { - value, ok := val.(float64) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *CircuitBreaker) bool { - return m(c) < value - }, nil -} - -func float64GT(m toFloat64, val interface{}) (hpredicate, error) { - value, ok := val.(float64) - if !ok { - return nil, fmt.Errorf("expected int, got %T", val) - } - return func(c *CircuitBreaker) bool { - return m(c) > value - }, nil -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/ratio.go b/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/ratio.go deleted file mode 100644 index cedf21f57..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/cbreaker/ratio.go +++ /dev/null @@ -1,70 +0,0 @@ -package cbreaker - -import ( - "fmt" - "time" - - "github.com/mailgun/log" - "github.com/mailgun/timetools" -) - -// ratioController allows passing portions traffic back to the endpoints, -// increasing the amount of passed requests using linear function: -// -// allowedRequestsRatio = 0.5 * (Now() - Start())/Duration -// -type ratioController struct { - duration time.Duration - start time.Time - tm timetools.TimeProvider - allowed int - denied int -} - -func newRatioController(tm timetools.TimeProvider, rampUp time.Duration) *ratioController { - return &ratioController{ - duration: rampUp, - tm: tm, - start: tm.UtcNow(), - } -} - -func (r *ratioController) String() string { - return fmt.Sprintf("RatioController(target=%f, current=%f, allowed=%d, denied=%d)", r.targetRatio(), r.computeRatio(r.allowed, r.denied), r.allowed, r.denied) -} - -func (r *ratioController) allowRequest() bool { - log.Infof("%v", r) - t := r.targetRatio() - // This condition answers the question - would we satisfy the target ratio if we allow this request? - e := r.computeRatio(r.allowed+1, r.denied) - if e < t { - r.allowed++ - log.Infof("%v allowed", r) - return true - } - r.denied++ - log.Infof("%v denied", r) - return false -} - -func (r *ratioController) computeRatio(allowed, denied int) float64 { - if denied+allowed == 0 { - return 0 - } - return float64(allowed) / float64(denied+allowed) -} - -func (r *ratioController) targetRatio() float64 { - // Here's why it's 0.5: - // We are watching the following ratio - // ratio = a / (a + d) - // We can notice, that once we get to 0.5 - // 0.5 = a / (a + d) - // we can evaluate that a = d - // that means equilibrium, where we would allow all the requests - // after this point to achieve ratio of 1 (that can never be reached unless d is 0) - // so we stop from there - multiplier := 0.5 / float64(r.duration) - return multiplier * float64(r.tm.UtcNow().Sub(r.start)) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd.go b/Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd.go deleted file mode 100644 index 08d866600..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd.go +++ /dev/null @@ -1,151 +0,0 @@ -// package forwarder implements http handler that forwards requests to remote server -// and serves back the response -package forward - -import ( - "io" - "net/http" - "net/url" - "os" - "strconv" - "time" - - "github.com/mailgun/oxy/utils" -) - -// ReqRewriter can alter request headers and body -type ReqRewriter interface { - Rewrite(r *http.Request) -} - -type optSetter func(f *Forwarder) error - -func PassHostHeader(b bool) optSetter { - return func(f *Forwarder) error { - f.passHost = b - return nil - } -} - -func RoundTripper(r http.RoundTripper) optSetter { - return func(f *Forwarder) error { - f.roundTripper = r - return nil - } -} - -func Rewriter(r ReqRewriter) optSetter { - return func(f *Forwarder) error { - f.rewriter = r - return nil - } -} - -// ErrorHandler is a functional argument that sets error handler of the server -func ErrorHandler(h utils.ErrorHandler) optSetter { - return func(f *Forwarder) error { - f.errHandler = h - return nil - } -} - -func Logger(l utils.Logger) optSetter { - return func(f *Forwarder) error { - f.log = l - return nil - } -} - -type Forwarder struct { - errHandler utils.ErrorHandler - roundTripper http.RoundTripper - rewriter ReqRewriter - log utils.Logger - passHost bool -} - -func New(setters ...optSetter) (*Forwarder, error) { - f := &Forwarder{} - for _, s := range setters { - if err := s(f); err != nil { - return nil, err - } - } - if f.roundTripper == nil { - f.roundTripper = http.DefaultTransport - } - if f.rewriter == nil { - h, err := os.Hostname() - if err != nil { - h = "localhost" - } - f.rewriter = &HeaderRewriter{TrustForwardHeader: true, Hostname: h} - } - if f.log == nil { - f.log = utils.NullLogger - } - if f.errHandler == nil { - f.errHandler = utils.DefaultHandler - } - return f, nil -} - -func (f *Forwarder) ServeHTTP(w http.ResponseWriter, req *http.Request) { - start := time.Now().UTC() - response, err := f.roundTripper.RoundTrip(f.copyRequest(req, req.URL)) - if err != nil { - f.log.Errorf("Error forwarding to %v, err: %v", req.URL, err) - f.errHandler.ServeHTTP(w, req, err) - return - } - - if req.TLS != nil { - f.log.Infof("Round trip: %v, code: %v, duration: %v tls:version: %x, tls:resume:%t, tls:csuite:%x, tls:server:%v", - req.URL, response.StatusCode, time.Now().UTC().Sub(start), - req.TLS.Version, - req.TLS.DidResume, - req.TLS.CipherSuite, - req.TLS.ServerName) - } else { - f.log.Infof("Round trip: %v, code: %v, duration: %v", - req.URL, response.StatusCode, time.Now().UTC().Sub(start)) - } - - utils.CopyHeaders(w.Header(), response.Header) - w.WriteHeader(response.StatusCode) - written, _ := io.Copy(w, response.Body) - if written != 0 { - w.Header().Set(ContentLength, strconv.FormatInt(written, 10)) - } - response.Body.Close() -} - -func (f *Forwarder) copyRequest(req *http.Request, u *url.URL) *http.Request { - outReq := new(http.Request) - *outReq = *req // includes shallow copies of maps, but we handle this below - - outReq.URL = utils.CopyURL(req.URL) - outReq.URL.Scheme = u.Scheme - outReq.URL.Host = u.Host - outReq.URL.Opaque = req.RequestURI - // raw query is already included in RequestURI, so ignore it to avoid dupes - outReq.URL.RawQuery = "" - // Do not pass client Host header unless optsetter PassHostHeader is set. - if f.passHost != true { - outReq.Host = u.Host - } - outReq.Proto = "HTTP/1.1" - outReq.ProtoMajor = 1 - outReq.ProtoMinor = 1 - - // Overwrite close flag so we can keep persistent connection for the backend servers - outReq.Close = false - - outReq.Header = make(http.Header) - utils.CopyHeaders(outReq.Header, req.Header) - - if f.rewriter != nil { - f.rewriter.Rewrite(outReq) - } - return outReq -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd_test.go deleted file mode 100644 index 02b06f4a3..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/fwd_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package forward - -import ( - "bytes" - "fmt" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "github.com/mailgun/oxy/testutils" - "github.com/mailgun/oxy/utils" - - . "gopkg.in/check.v1" -) - -func TestFwd(t *testing.T) { TestingT(t) } - -type FwdSuite struct{} - -var _ = Suite(&FwdSuite{}) - -// Makes sure hop-by-hop headers are removed -func (s *FwdSuite) TestForwardHopHeaders(c *C) { - called := false - var outHeaders http.Header - var outHost, expectedHost string - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - called = true - outHeaders = req.Header - outHost = req.Host - w.Write([]byte("hello")) - }) - defer srv.Close() - - f, err := New() - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - expectedHost = req.URL.Host - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - headers := http.Header{ - Connection: []string{"close"}, - KeepAlive: []string{"timeout=600"}, - } - - re, body, err := testutils.Get(proxy.URL, testutils.Headers(headers)) - c.Assert(err, IsNil) - c.Assert(string(body), Equals, "hello") - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(called, Equals, true) - c.Assert(outHeaders.Get(Connection), Equals, "") - c.Assert(outHeaders.Get(KeepAlive), Equals, "") - c.Assert(outHost, Equals, expectedHost) -} - -func (s *FwdSuite) TestDefaultErrHandler(c *C) { - f, err := New() - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI("http://localhost:63450") - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - re, _, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusBadGateway) -} - -func (s *FwdSuite) TestCustomErrHandler(c *C) { - f, err := New(ErrorHandler(utils.ErrorHandlerFunc(func(w http.ResponseWriter, req *http.Request, err error) { - w.WriteHeader(http.StatusTeapot) - w.Write([]byte(http.StatusText(http.StatusTeapot))) - }))) - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI("http://localhost:63450") - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - re, body, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusTeapot) - c.Assert(string(body), Equals, http.StatusText(http.StatusTeapot)) -} - -// Makes sure hop-by-hop headers are removed -func (s *FwdSuite) TestForwardedHeaders(c *C) { - var outHeaders http.Header - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - outHeaders = req.Header - w.Write([]byte("hello")) - }) - defer srv.Close() - - f, err := New(Rewriter(&HeaderRewriter{TrustForwardHeader: true, Hostname: "hello"})) - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - headers := http.Header{ - XForwardedProto: []string{"httpx"}, - XForwardedFor: []string{"192.168.1.1"}, - XForwardedServer: []string{"foobar"}, - XForwardedHost: []string{"upstream-foobar"}, - } - - re, _, err := testutils.Get(proxy.URL, testutils.Headers(headers)) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(outHeaders.Get(XForwardedProto), Equals, "httpx") - c.Assert(strings.Contains(outHeaders.Get(XForwardedFor), "192.168.1.1"), Equals, true) - c.Assert(strings.Contains(outHeaders.Get(XForwardedHost), "upstream-foobar"), Equals, true) - c.Assert(outHeaders.Get(XForwardedServer), Equals, "hello") -} - -func (s *FwdSuite) TestCustomRewriter(c *C) { - var outHeaders http.Header - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - outHeaders = req.Header - w.Write([]byte("hello")) - }) - defer srv.Close() - - f, err := New(Rewriter(&HeaderRewriter{TrustForwardHeader: false, Hostname: "hello"})) - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - headers := http.Header{ - XForwardedProto: []string{"httpx"}, - XForwardedFor: []string{"192.168.1.1"}, - } - - re, _, err := testutils.Get(proxy.URL, testutils.Headers(headers)) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(outHeaders.Get(XForwardedProto), Equals, "http") - c.Assert(strings.Contains(outHeaders.Get(XForwardedFor), "192.168.1.1"), Equals, false) -} - -func (s *FwdSuite) TestCustomTransportTimeout(c *C) { - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - time.Sleep(20 * time.Millisecond) - w.Write([]byte("hello")) - }) - defer srv.Close() - - f, err := New(RoundTripper( - &http.Transport{ - ResponseHeaderTimeout: 5 * time.Millisecond, - })) - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - re, _, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusGatewayTimeout) -} - -func (s *FwdSuite) TestCustomLogger(c *C) { - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("hello")) - }) - defer srv.Close() - - buf := &bytes.Buffer{} - l := utils.NewFileLogger(buf, utils.INFO) - - f, err := New(Logger(l)) - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - re, _, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(strings.Contains(buf.String(), srv.URL), Equals, true) -} - -func (s *FwdSuite) TestEscapedURL(c *C) { - var outURL string - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - outURL = req.RequestURI - w.Write([]byte("hello")) - }) - defer srv.Close() - - f, err := New() - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - path := "/log/http%3A%2F%2Fwww.site.com%2Fsomething?a=b" - - request, err := http.NewRequest("GET", proxy.URL, nil) - parsed := testutils.ParseURI(proxy.URL) - parsed.Opaque = path - request.URL = parsed - re, err := http.DefaultClient.Do(request) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(outURL, Equals, path) -} - -func (s *FwdSuite) TestForwardedProto(c *C) { - var proto string - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - proto = req.Header.Get(XForwardedProto) - w.Write([]byte("hello")) - }) - defer srv.Close() - - buf := &bytes.Buffer{} - l := utils.NewFileLogger(buf, utils.INFO) - - f, err := New(Logger(l)) - c.Assert(err, IsNil) - - proxy := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - tproxy := httptest.NewUnstartedServer(proxy) - tproxy.StartTLS() - defer tproxy.Close() - - re, _, err := testutils.Get(tproxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(proto, Equals, "https") - - c.Assert(strings.Contains(buf.String(), "tls"), Equals, true) -} - -func (s *FwdSuite) TestChunkedResponseConversion(c *C) { - srv := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - h := w.(http.Hijacker) - conn, _, _ := h.Hijack() - fmt.Fprintf(conn, "HTTP/1.0 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n4\r\ntest\r\n5\r\ntest1\r\n5\r\ntest2\r\n0\r\n\r\n") - conn.Close() - }) - defer srv.Close() - - f, err := New() - c.Assert(err, IsNil) - - proxy := testutils.NewHandler(func(w http.ResponseWriter, req *http.Request) { - req.URL = testutils.ParseURI(srv.URL) - f.ServeHTTP(w, req) - }) - defer proxy.Close() - - re, body, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(string(body), Equals, "testtest1test2") - c.Assert(re.StatusCode, Equals, http.StatusOK) - c.Assert(re.Header.Get("Content-Length"), Equals, fmt.Sprintf("%d", len("testtest1test2"))) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/headers.go b/Godeps/_workspace/src/github.com/mailgun/oxy/forward/headers.go deleted file mode 100644 index 0702ed8fc..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/headers.go +++ /dev/null @@ -1,31 +0,0 @@ -package forward - -const ( - XForwardedProto = "X-Forwarded-Proto" - XForwardedFor = "X-Forwarded-For" - XForwardedHost = "X-Forwarded-Host" - XForwardedServer = "X-Forwarded-Server" - Connection = "Connection" - KeepAlive = "Keep-Alive" - ProxyAuthenticate = "Proxy-Authenticate" - ProxyAuthorization = "Proxy-Authorization" - Te = "Te" // canonicalized version of "TE" - Trailers = "Trailers" - TransferEncoding = "Transfer-Encoding" - Upgrade = "Upgrade" - ContentLength = "Content-Length" -) - -// Hop-by-hop headers. These are removed when sent to the backend. -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html -// Copied from reverseproxy.go, too bad -var HopHeaders = []string{ - Connection, - KeepAlive, - ProxyAuthenticate, - ProxyAuthorization, - Te, // canonicalized version of "TE" - Trailers, - TransferEncoding, - Upgrade, -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/rewrite.go b/Godeps/_workspace/src/github.com/mailgun/oxy/forward/rewrite.go deleted file mode 100644 index 4f3ac8b49..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/forward/rewrite.go +++ /dev/null @@ -1,48 +0,0 @@ -package forward - -import ( - "net" - "net/http" - "strings" - - "github.com/mailgun/oxy/utils" -) - -// Rewriter is responsible for removing hop-by-hop headers and setting forwarding headers -type HeaderRewriter struct { - TrustForwardHeader bool - Hostname string -} - -func (rw *HeaderRewriter) Rewrite(req *http.Request) { - if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - if rw.TrustForwardHeader { - if prior, ok := req.Header[XForwardedFor]; ok { - clientIP = strings.Join(prior, ", ") + ", " + clientIP - } - } - req.Header.Set(XForwardedFor, clientIP) - } - - if xfp := req.Header.Get(XForwardedProto); xfp != "" && rw.TrustForwardHeader { - req.Header.Set(XForwardedProto, xfp) - } else if req.TLS != nil { - req.Header.Set(XForwardedProto, "https") - } else { - req.Header.Set(XForwardedProto, "http") - } - - if xfh := req.Header.Get(XForwardedHost); xfh != "" && rw.TrustForwardHeader { - req.Header.Set(XForwardedHost, xfh) - } else if req.Host != "" { - req.Header.Set(XForwardedHost, req.Host) - } - - if rw.Hostname != "" { - req.Header.Set(XForwardedServer, rw.Hostname) - } - - // Remove hop-by-hop headers to the backend. Especially important is "Connection" because we want a persistent - // connection, regardless of what the client sent to us. - utils.RemoveHeaders(req.Header, HopHeaders...) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly.go deleted file mode 100644 index 5fa068911..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly.go +++ /dev/null @@ -1,99 +0,0 @@ -package memmetrics - -import ( - "math" - "sort" - "time" -) - -// SplitRatios provides simple anomaly detection for requests latencies. -// it splits values into good or bad category based on the threshold and the median value. -// If all values are not far from the median, it will return all values in 'good' set. -// Precision is the smallest value to consider, e.g. if set to millisecond, microseconds will be ignored. -func SplitLatencies(values []time.Duration, precision time.Duration) (good map[time.Duration]bool, bad map[time.Duration]bool) { - // Find the max latency M and then map each latency L to the ratio L/M and then call SplitFloat64 - v2r := map[float64]time.Duration{} - ratios := make([]float64, len(values)) - m := maxTime(values) - for i, v := range values { - ratio := float64(v/precision+1) / float64(m/precision+1) // +1 is to avoid division by 0 - v2r[ratio] = v - ratios[i] = ratio - } - good, bad = make(map[time.Duration]bool), make(map[time.Duration]bool) - // Note that multiplier makes this function way less sensitive than ratios detector, this is to avoid noise. - vgood, vbad := SplitFloat64(2, 0, ratios) - for r, _ := range vgood { - good[v2r[r]] = true - } - for r, _ := range vbad { - bad[v2r[r]] = true - } - return good, bad -} - -// SplitRatios provides simple anomaly detection for ratio values, that are all in the range [0, 1] -// it splits values into good or bad category based on the threshold and the median value. -// If all values are not far from the median, it will return all values in 'good' set. -func SplitRatios(values []float64) (good map[float64]bool, bad map[float64]bool) { - return SplitFloat64(1.5, 0, values) -} - -// SplitFloat64 provides simple anomaly detection for skewed data sets with no particular distribution. -// In essense it applies the formula if(v > median(values) + threshold * medianAbsoluteDeviation) -> anomaly -// There's a corner case where there are just 2 values, so by definition there's no value that exceeds the threshold. -// This case is solved by introducing additional value that we know is good, e.g. 0. That helps to improve the detection results -// on such data sets. -func SplitFloat64(threshold, sentinel float64, values []float64) (good map[float64]bool, bad map[float64]bool) { - good, bad = make(map[float64]bool), make(map[float64]bool) - var newValues []float64 - if len(values)%2 == 0 { - newValues = make([]float64, len(values)+1) - copy(newValues, values) - // Add a sentinel endpoint so we can distinguish outliers better - newValues[len(newValues)-1] = sentinel - } else { - newValues = values - } - - m := median(newValues) - mAbs := medianAbsoluteDeviation(newValues) - for _, v := range values { - if v > (m+mAbs)*threshold { - bad[v] = true - } else { - good[v] = true - } - } - return good, bad -} - -func median(values []float64) float64 { - vals := make([]float64, len(values)) - copy(vals, values) - sort.Float64s(vals) - l := len(vals) - if l%2 != 0 { - return vals[l/2] - } - return (vals[l/2-1] + vals[l/2]) / 2.0 -} - -func medianAbsoluteDeviation(values []float64) float64 { - m := median(values) - distances := make([]float64, len(values)) - for i, v := range values { - distances[i] = math.Abs(v - m) - } - return median(distances) -} - -func maxTime(vals []time.Duration) time.Duration { - val := vals[0] - for _, v := range vals { - if v > val { - val = v - } - } - return val -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly_test.go deleted file mode 100644 index 2b622e6a7..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/anomaly_test.go +++ /dev/null @@ -1,158 +0,0 @@ -package memmetrics - -import ( - "time" - - . "gopkg.in/check.v1" -) - -type AnomalySuite struct { -} - -var _ = Suite(&AnomalySuite{}) - -func (s *AnomalySuite) TestMedian(c *C) { - c.Assert(median([]float64{0.1, 0.2}), Equals, (float64(0.1)+float64(0.2))/2.0) - c.Assert(median([]float64{0.3, 0.2, 0.5}), Equals, 0.3) -} - -func (s *AnomalySuite) TestSplitRatios(c *C) { - vals := []struct { - values []float64 - good []float64 - bad []float64 - }{ - { - values: []float64{0, 0}, - good: []float64{0}, - bad: []float64{}, - }, - - { - values: []float64{0, 1}, - good: []float64{0}, - bad: []float64{1}, - }, - { - values: []float64{0.1, 0.1}, - good: []float64{0.1}, - bad: []float64{}, - }, - - { - values: []float64{0.15, 0.1}, - good: []float64{0.15, 0.1}, - bad: []float64{}, - }, - { - values: []float64{0.01, 0.01}, - good: []float64{0.01}, - bad: []float64{}, - }, - { - values: []float64{0.012, 0.01, 1}, - good: []float64{0.012, 0.01}, - bad: []float64{1}, - }, - { - values: []float64{0, 0, 1, 1}, - good: []float64{0}, - bad: []float64{1}, - }, - { - values: []float64{0, 0.1, 0.1, 0}, - good: []float64{0}, - bad: []float64{0.1}, - }, - { - values: []float64{0, 0.01, 0.1, 0}, - good: []float64{0}, - bad: []float64{0.01, 0.1}, - }, - { - values: []float64{0, 0.01, 0.02, 1}, - good: []float64{0, 0.01, 0.02}, - bad: []float64{1}, - }, - { - values: []float64{0, 0, 0, 0, 0, 0.01, 0.02, 1}, - good: []float64{0}, - bad: []float64{0.01, 0.02, 1}, - }, - } - for _, v := range vals { - good, bad := SplitRatios(v.values) - vgood, vbad := make(map[float64]bool, len(v.good)), make(map[float64]bool, len(v.bad)) - for _, v := range v.good { - vgood[v] = true - } - for _, v := range v.bad { - vbad[v] = true - } - - c.Assert(good, DeepEquals, vgood) - c.Assert(bad, DeepEquals, vbad) - } -} - -func (s *AnomalySuite) TestSplitLatencies(c *C) { - vals := []struct { - values []int - good []int - bad []int - }{ - { - values: []int{0, 0}, - good: []int{0}, - bad: []int{}, - }, - { - values: []int{1, 2}, - good: []int{1, 2}, - bad: []int{}, - }, - { - values: []int{1, 2, 4}, - good: []int{1, 2, 4}, - bad: []int{}, - }, - { - values: []int{8, 8, 18}, - good: []int{8}, - bad: []int{18}, - }, - { - values: []int{32, 28, 11, 26, 19, 51, 25, 39, 28, 26, 8, 97}, - good: []int{32, 28, 11, 26, 19, 51, 25, 39, 28, 26, 8}, - bad: []int{97}, - }, - { - values: []int{1, 2, 4, 40}, - good: []int{1, 2, 4}, - bad: []int{40}, - }, - { - values: []int{40, 60, 1000}, - good: []int{40, 60}, - bad: []int{1000}, - }, - } - for _, v := range vals { - vvalues := make([]time.Duration, len(v.values)) - for i, d := range v.values { - vvalues[i] = time.Millisecond * time.Duration(d) - } - good, bad := SplitLatencies(vvalues, time.Millisecond) - - vgood, vbad := make(map[time.Duration]bool, len(v.good)), make(map[time.Duration]bool, len(v.bad)) - for _, v := range v.good { - vgood[time.Duration(v)*time.Millisecond] = true - } - for _, v := range v.bad { - vbad[time.Duration(v)*time.Millisecond] = true - } - - c.Assert(good, DeepEquals, vgood) - c.Assert(bad, DeepEquals, vbad) - } -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter.go deleted file mode 100644 index 71e0a7951..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter.go +++ /dev/null @@ -1,155 +0,0 @@ -package memmetrics - -import ( - "fmt" - "time" - - "github.com/mailgun/timetools" -) - -type rcOptSetter func(*RollingCounter) error - -func CounterClock(c timetools.TimeProvider) rcOptSetter { - return func(r *RollingCounter) error { - r.clock = c - return nil - } -} - -// Calculates in memory failure rate of an endpoint using rolling window of a predefined size -type RollingCounter struct { - clock timetools.TimeProvider - resolution time.Duration - values []int - countedBuckets int // how many samples in different buckets have we collected so far - lastBucket int // last recorded bucket - lastUpdated time.Time -} - -// NewCounter creates a counter with fixed amount of buckets that are rotated every resolution period. -// E.g. 10 buckets with 1 second means that every new second the bucket is refreshed, so it maintains 10 second rolling window. -// By default creates a bucket with 10 buckets and 1 second resolution -func NewCounter(buckets int, resolution time.Duration, options ...rcOptSetter) (*RollingCounter, error) { - if buckets <= 0 { - return nil, fmt.Errorf("Buckets should be >= 0") - } - if resolution < time.Second { - return nil, fmt.Errorf("Resolution should be larger than a second") - } - - rc := &RollingCounter{ - lastBucket: -1, - resolution: resolution, - - values: make([]int, buckets), - } - - for _, o := range options { - if err := o(rc); err != nil { - return nil, err - } - } - - if rc.clock == nil { - rc.clock = &timetools.RealTime{} - } - - return rc, nil -} - -func (c *RollingCounter) Append(o *RollingCounter) error { - c.Inc(int(o.Count())) - return nil -} - -func (c *RollingCounter) Clone() *RollingCounter { - c.cleanup() - other := &RollingCounter{ - resolution: c.resolution, - values: make([]int, len(c.values)), - clock: c.clock, - lastBucket: c.lastBucket, - lastUpdated: c.lastUpdated, - } - for i, v := range c.values { - other.values[i] = v - } - return other -} - -func (c *RollingCounter) Reset() { - c.lastBucket = -1 - c.countedBuckets = 0 - c.lastUpdated = time.Time{} - for i := range c.values { - c.values[i] = 0 - } -} - -func (c *RollingCounter) CountedBuckets() int { - return c.countedBuckets -} - -func (c *RollingCounter) Count() int64 { - c.cleanup() - return c.sum() -} - -func (c *RollingCounter) Resolution() time.Duration { - return c.resolution -} - -func (c *RollingCounter) Buckets() int { - return len(c.values) -} - -func (c *RollingCounter) WindowSize() time.Duration { - return time.Duration(len(c.values)) * c.resolution -} - -func (c *RollingCounter) Inc(v int) { - c.cleanup() - c.incBucketValue(v) -} - -func (c *RollingCounter) incBucketValue(v int) { - now := c.clock.UtcNow() - bucket := c.getBucket(now) - c.values[bucket] += v - c.lastUpdated = now - // Update usage stats if we haven't collected enough data - if c.countedBuckets < len(c.values) { - // Only update if we have advanced to the next bucket and not incremented the value - // in the current bucket. - if c.lastBucket != bucket { - c.lastBucket = bucket - c.countedBuckets++ - } - } -} - -// Returns the number in the moving window bucket that this slot occupies -func (c *RollingCounter) getBucket(t time.Time) int { - return int(t.Truncate(c.resolution).Unix() % int64(len(c.values))) -} - -// Reset buckets that were not updated -func (c *RollingCounter) cleanup() { - now := c.clock.UtcNow() - for i := 0; i < len(c.values); i++ { - now = now.Add(time.Duration(-1*i) * c.resolution) - if now.Truncate(c.resolution).After(c.lastUpdated.Truncate(c.resolution)) { - c.values[c.getBucket(now)] = 0 - } else { - break - } - } -} - -func (c *RollingCounter) sum() int64 { - out := int64(0) - for _, v := range c.values { - out += int64(v) - } - return out -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter_test.go deleted file mode 100644 index cf546d2ee..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/counter_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package memmetrics - -import ( - "time" - - "github.com/mailgun/timetools" - . "gopkg.in/check.v1" -) - -type CounterSuite struct { - clock *timetools.FreezedTime -} - -var _ = Suite(&CounterSuite{}) - -func (s *CounterSuite) SetUpSuite(c *C) { - s.clock = &timetools.FreezedTime{ - CurrentTime: time.Date(2012, 3, 4, 5, 6, 7, 0, time.UTC), - } -} - -func (s *CounterSuite) TestCloneExpired(c *C) { - cnt, err := NewCounter(3, time.Second, CounterClock(s.clock)) - c.Assert(err, IsNil) - cnt.Inc(1) - s.clock.Sleep(time.Second) - cnt.Inc(1) - s.clock.Sleep(time.Second) - cnt.Inc(1) - s.clock.Sleep(time.Second) - - out := cnt.Clone() - c.Assert(out.Count(), Equals, int64(2)) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram.go deleted file mode 100644 index 21db94ce1..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram.go +++ /dev/null @@ -1,174 +0,0 @@ -package memmetrics - -import ( - "fmt" - "time" - - "github.com/codahale/hdrhistogram" - "github.com/mailgun/timetools" -) - -// HDRHistogram is a tiny wrapper around github.com/codahale/hdrhistogram that provides convenience functions for measuring http latencies -type HDRHistogram struct { - // lowest trackable value - low int64 - // highest trackable value - high int64 - // significant figures - sigfigs int - - h *hdrhistogram.Histogram -} - -func NewHDRHistogram(low, high int64, sigfigs int) (h *HDRHistogram, err error) { - defer func() { - if msg := recover(); msg != nil { - err = fmt.Errorf("%s", msg) - } - }() - return &HDRHistogram{ - low: low, - high: high, - sigfigs: sigfigs, - h: hdrhistogram.New(low, high, sigfigs), - }, nil -} - -// Returns latency at quantile with microsecond precision -func (h *HDRHistogram) LatencyAtQuantile(q float64) time.Duration { - return time.Duration(h.ValueAtQuantile(q)) * time.Microsecond -} - -// Records latencies with microsecond precision -func (h *HDRHistogram) RecordLatencies(d time.Duration, n int64) error { - return h.RecordValues(int64(d/time.Microsecond), n) -} - -func (h *HDRHistogram) Reset() { - h.h.Reset() -} - -func (h *HDRHistogram) ValueAtQuantile(q float64) int64 { - return h.h.ValueAtQuantile(q) -} - -func (h *HDRHistogram) RecordValues(v, n int64) error { - return h.h.RecordValues(v, n) -} - -func (h *HDRHistogram) Merge(other *HDRHistogram) error { - if other == nil { - return fmt.Errorf("other is nil") - } - h.h.Merge(other.h) - return nil -} - -type rhOptSetter func(r *RollingHDRHistogram) error - -func RollingClock(clock timetools.TimeProvider) rhOptSetter { - return func(r *RollingHDRHistogram) error { - r.clock = clock - return nil - } -} - -// RollingHistogram holds multiple histograms and rotates every period. -// It provides resulting histogram as a result of a call of 'Merged' function. -type RollingHDRHistogram struct { - idx int - lastRoll time.Time - period time.Duration - bucketCount int - low int64 - high int64 - sigfigs int - buckets []*HDRHistogram - clock timetools.TimeProvider -} - -func NewRollingHDRHistogram(low, high int64, sigfigs int, period time.Duration, bucketCount int, options ...rhOptSetter) (*RollingHDRHistogram, error) { - rh := &RollingHDRHistogram{ - bucketCount: bucketCount, - period: period, - low: low, - high: high, - sigfigs: sigfigs, - } - - for _, o := range options { - if err := o(rh); err != nil { - return nil, err - } - } - - if rh.clock == nil { - rh.clock = &timetools.RealTime{} - } - - buckets := make([]*HDRHistogram, rh.bucketCount) - for i := range buckets { - h, err := NewHDRHistogram(low, high, sigfigs) - if err != nil { - return nil, err - } - buckets[i] = h - } - rh.buckets = buckets - return rh, nil -} - -func (r *RollingHDRHistogram) Append(o *RollingHDRHistogram) error { - if r.bucketCount != o.bucketCount || r.period != o.period || r.low != o.low || r.high != o.high || r.sigfigs != o.sigfigs { - return fmt.Errorf("can't merge") - } - - for i := range r.buckets { - if err := r.buckets[i].Merge(o.buckets[i]); err != nil { - return err - } - } - return nil -} - -func (r *RollingHDRHistogram) Reset() { - r.idx = 0 - r.lastRoll = r.clock.UtcNow() - for _, b := range r.buckets { - b.Reset() - } -} - -func (r *RollingHDRHistogram) rotate() { - r.idx = (r.idx + 1) % len(r.buckets) - r.buckets[r.idx].Reset() -} - -func (r *RollingHDRHistogram) Merged() (*HDRHistogram, error) { - m, err := NewHDRHistogram(r.low, r.high, r.sigfigs) - if err != nil { - return m, err - } - for _, h := range r.buckets { - if m.Merge(h); err != nil { - return nil, err - } - } - return m, nil -} - -func (r *RollingHDRHistogram) getHist() *HDRHistogram { - if r.clock.UtcNow().Sub(r.lastRoll) >= r.period { - r.rotate() - r.lastRoll = r.clock.UtcNow() - } - return r.buckets[r.idx] -} - -func (r *RollingHDRHistogram) RecordLatencies(v time.Duration, n int64) error { - return r.getHist().RecordLatencies(v, n) -} - -func (r *RollingHDRHistogram) RecordValues(v, n int64) error { - return r.getHist().RecordValues(v, n) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram_test.go deleted file mode 100644 index 8eacfdf9b..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/histogram_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package memmetrics - -import ( - "time" - - "github.com/mailgun/timetools" - . "gopkg.in/check.v1" -) - -type HistogramSuite struct { - tm *timetools.FreezedTime -} - -var _ = Suite(&HistogramSuite{}) - -func (s *HistogramSuite) SetUpSuite(c *C) { - s.tm = &timetools.FreezedTime{ - CurrentTime: time.Date(2012, 3, 4, 5, 6, 7, 0, time.UTC), - } -} - -func (s *HistogramSuite) TestMerge(c *C) { - a, err := NewHDRHistogram(1, 3600000, 2) - c.Assert(err, IsNil) - - a.RecordValues(1, 2) - - b, err := NewHDRHistogram(1, 3600000, 2) - c.Assert(err, IsNil) - - b.RecordValues(2, 1) - - c.Assert(a.Merge(b), IsNil) - - c.Assert(a.ValueAtQuantile(50), Equals, int64(1)) - c.Assert(a.ValueAtQuantile(100), Equals, int64(2)) -} - -func (s *HistogramSuite) TestInvalidParams(c *C) { - _, err := NewHDRHistogram(1, 3600000, 0) - c.Assert(err, NotNil) -} - -func (s *HistogramSuite) TestMergeNil(c *C) { - a, err := NewHDRHistogram(1, 3600000, 1) - c.Assert(err, IsNil) - - c.Assert(a.Merge(nil), NotNil) -} - -func (s *HistogramSuite) TestRotation(c *C) { - h, err := NewRollingHDRHistogram( - 1, // min value - 3600000, // max value - 3, // significant figurwes - time.Second, // 1 second is a rolling period - 2, // 2 histograms in a window - RollingClock(s.tm)) - - c.Assert(err, IsNil) - c.Assert(h, NotNil) - - h.RecordValues(5, 1) - - m, err := h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(5)) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - h.RecordValues(2, 1) - h.RecordValues(1, 1) - - m, err = h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(5)) - - // rotate, this means that the old value would evaporate - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - h.RecordValues(1, 1) - m, err = h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(2)) -} - -func (s *HistogramSuite) TestReset(c *C) { - h, err := NewRollingHDRHistogram( - 1, // min value - 3600000, // max value - 3, // significant figurwes - time.Second, // 1 second is a rolling period - 2, // 2 histograms in a window - RollingClock(s.tm)) - - c.Assert(err, IsNil) - c.Assert(h, NotNil) - - h.RecordValues(5, 1) - - m, err := h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(5)) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - h.RecordValues(2, 1) - h.RecordValues(1, 1) - - m, err = h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(5)) - - h.Reset() - - h.RecordValues(5, 1) - - m, err = h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(5)) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - h.RecordValues(2, 1) - h.RecordValues(1, 1) - - m, err = h.Merged() - c.Assert(err, IsNil) - c.Assert(m.ValueAtQuantile(100), Equals, int64(5)) - -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio.go deleted file mode 100644 index f21f375ea..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio.go +++ /dev/null @@ -1,120 +0,0 @@ -package memmetrics - -import ( - "time" - - "github.com/mailgun/timetools" -) - -type ratioOptSetter func(r *RatioCounter) error - -func RatioClock(clock timetools.TimeProvider) ratioOptSetter { - return func(r *RatioCounter) error { - r.clock = clock - return nil - } -} - -// RatioCounter calculates a ratio of a/a+b over a rolling window of predefined buckets -type RatioCounter struct { - clock timetools.TimeProvider - a *RollingCounter - b *RollingCounter -} - -func NewRatioCounter(buckets int, resolution time.Duration, options ...ratioOptSetter) (*RatioCounter, error) { - rc := &RatioCounter{} - - for _, o := range options { - if err := o(rc); err != nil { - return nil, err - } - } - - if rc.clock == nil { - rc.clock = &timetools.RealTime{} - } - - a, err := NewCounter(buckets, resolution, CounterClock(rc.clock)) - if err != nil { - return nil, err - } - - b, err := NewCounter(buckets, resolution, CounterClock(rc.clock)) - if err != nil { - return nil, err - } - - rc.a = a - rc.b = b - return rc, nil -} - -func (r *RatioCounter) Reset() { - r.a.Reset() - r.b.Reset() -} - -func (r *RatioCounter) IsReady() bool { - return r.a.countedBuckets+r.b.countedBuckets >= len(r.a.values) -} - -func (r *RatioCounter) CountA() int64 { - return r.a.Count() -} - -func (r *RatioCounter) CountB() int64 { - return r.b.Count() -} - -func (r *RatioCounter) Resolution() time.Duration { - return r.a.Resolution() -} - -func (r *RatioCounter) Buckets() int { - return r.a.Buckets() -} - -func (r *RatioCounter) WindowSize() time.Duration { - return r.a.WindowSize() -} - -func (r *RatioCounter) ProcessedCount() int64 { - return r.CountA() + r.CountB() -} - -func (r *RatioCounter) Ratio() float64 { - a := r.a.Count() - b := r.b.Count() - // No data, return ok - if a+b == 0 { - return 0 - } - return float64(a) / float64(a+b) -} - -func (r *RatioCounter) IncA(v int) { - r.a.Inc(v) -} - -func (r *RatioCounter) IncB(v int) { - r.b.Inc(v) -} - -type TestMeter struct { - Rate float64 - NotReady bool - WindowSize time.Duration -} - -func (tm *TestMeter) GetWindowSize() time.Duration { - return tm.WindowSize -} - -func (tm *TestMeter) IsReady() bool { - return !tm.NotReady -} - -func (tm *TestMeter) GetRate() float64 { - return tm.Rate -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio_test.go deleted file mode 100644 index 68359ec18..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/ratio_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package memmetrics - -import ( - "testing" - "time" - - "github.com/mailgun/timetools" - . "gopkg.in/check.v1" -) - -func TestFailrate(t *testing.T) { TestingT(t) } - -type FailRateSuite struct { - tm *timetools.FreezedTime -} - -var _ = Suite(&FailRateSuite{}) - -func (s *FailRateSuite) SetUpSuite(c *C) { - s.tm = &timetools.FreezedTime{ - CurrentTime: time.Date(2012, 3, 4, 5, 6, 7, 0, time.UTC), - } -} - -func (s *FailRateSuite) TestInvalidParams(c *C) { - // Bad buckets count - _, err := NewRatioCounter(0, time.Second, RatioClock(s.tm)) - c.Assert(err, Not(IsNil)) - - // Too precise resolution - _, err = NewRatioCounter(10, time.Millisecond, RatioClock(s.tm)) - c.Assert(err, Not(IsNil)) -} - -func (s *FailRateSuite) TestNotReady(c *C) { - // No data - fr, err := NewRatioCounter(10, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - c.Assert(fr.IsReady(), Equals, false) - c.Assert(fr.Ratio(), Equals, 0.0) - - // Not enough data - fr, err = NewRatioCounter(10, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - fr.CountA() - c.Assert(fr.IsReady(), Equals, false) -} - -func (s *FailRateSuite) TestNoB(c *C) { - fr, err := NewRatioCounter(1, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - fr.IncA(1) - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, 1.0) -} - -func (s *FailRateSuite) TestNoA(c *C) { - fr, err := NewRatioCounter(1, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - fr.IncB(1) - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, 0.0) -} - -// Make sure that data is properly calculated over several buckets -func (s *FailRateSuite) TestMultipleBuckets(c *C) { - fr, err := NewRatioCounter(3, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - - fr.IncB(1) - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, float64(2)/float64(3)) -} - -// Make sure that data is properly calculated over several buckets -// When we overwrite old data when the window is rolling -func (s *FailRateSuite) TestOverwriteBuckets(c *C) { - fr, err := NewRatioCounter(3, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - - fr.IncB(1) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - // This time we should overwrite the old data points - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - fr.IncB(2) - - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, float64(3)/float64(5)) -} - -// Make sure we cleanup the data after periods of inactivity -// So it does not mess up the stats -func (s *FailRateSuite) TestInactiveBuckets(c *C) { - - fr, err := NewRatioCounter(3, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - - fr.IncB(1) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - // This time we should overwrite the old data points with new data - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - fr.IncB(2) - - // Jump to the last bucket and change the data - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second * 2) - fr.IncB(1) - - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, float64(1)/float64(4)) -} - -func (s *FailRateSuite) TestLongPeriodsOfInactivity(c *C) { - fr, err := NewRatioCounter(2, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - - fr.IncB(1) - - s.tm.CurrentTime = s.tm.CurrentTime.Add(time.Second) - fr.IncA(1) - - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, 0.5) - - // This time we should overwrite all data points - s.tm.CurrentTime = s.tm.CurrentTime.Add(100 * time.Second) - fr.IncA(1) - c.Assert(fr.Ratio(), Equals, 1.0) -} - -func (s *FailRateSuite) TestReset(c *C) { - fr, err := NewRatioCounter(1, time.Second, RatioClock(s.tm)) - c.Assert(err, IsNil) - - fr.IncB(1) - fr.IncA(1) - - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, 0.5) - - // Reset the counter - fr.Reset() - c.Assert(fr.IsReady(), Equals, false) - - // Now add some stats - fr.IncA(2) - - // We are game again! - c.Assert(fr.IsReady(), Equals, true) - c.Assert(fr.Ratio(), Equals, 1.0) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip.go deleted file mode 100644 index 7df13855b..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip.go +++ /dev/null @@ -1,228 +0,0 @@ -package memmetrics - -import ( - "net/http" - "time" - - "github.com/mailgun/timetools" -) - -// RTMetrics provides aggregated performance metrics for HTTP requests processing -// such as round trip latency, response codes counters network error and total requests. -// all counters are collected as rolling window counters with defined precision, histograms -// are a rolling window histograms with defined precision as well. -// See RTOptions for more detail on parameters. -type RTMetrics struct { - total *RollingCounter - netErrors *RollingCounter - statusCodes map[int]*RollingCounter - histogram *RollingHDRHistogram - - newCounter NewCounterFn - newHist NewRollingHistogramFn - clock timetools.TimeProvider -} - -type rrOptSetter func(r *RTMetrics) error - -type NewRTMetricsFn func() (*RTMetrics, error) -type NewCounterFn func() (*RollingCounter, error) -type NewRollingHistogramFn func() (*RollingHDRHistogram, error) - -func RTCounter(new NewCounterFn) rrOptSetter { - return func(r *RTMetrics) error { - r.newCounter = new - return nil - } -} - -func RTHistogram(new NewRollingHistogramFn) rrOptSetter { - return func(r *RTMetrics) error { - r.newHist = new - return nil - } -} - -func RTClock(clock timetools.TimeProvider) rrOptSetter { - return func(r *RTMetrics) error { - r.clock = clock - return nil - } -} - -// NewRTMetrics returns new instance of metrics collector. -func NewRTMetrics(settings ...rrOptSetter) (*RTMetrics, error) { - m := &RTMetrics{ - statusCodes: make(map[int]*RollingCounter), - } - for _, s := range settings { - if err := s(m); err != nil { - return nil, err - } - } - - if m.clock == nil { - m.clock = &timetools.RealTime{} - } - - if m.newCounter == nil { - m.newCounter = func() (*RollingCounter, error) { - return NewCounter(counterBuckets, counterResolution, CounterClock(m.clock)) - } - } - - if m.newHist == nil { - m.newHist = func() (*RollingHDRHistogram, error) { - return NewRollingHDRHistogram(histMin, histMax, histSignificantFigures, histPeriod, histBuckets, RollingClock(m.clock)) - } - } - - h, err := m.newHist() - if err != nil { - return nil, err - } - - netErrors, err := m.newCounter() - if err != nil { - return nil, err - } - - total, err := m.newCounter() - if err != nil { - return nil, err - } - - m.histogram = h - m.netErrors = netErrors - m.total = total - return m, nil -} - -func (m *RTMetrics) CounterWindowSize() time.Duration { - return m.total.WindowSize() -} - -// GetNetworkErrorRatio calculates the amont of network errors such as time outs and dropped connection -// that occured in the given time window compared to the total requests count. -func (m *RTMetrics) NetworkErrorRatio() float64 { - if m.total.Count() == 0 { - return 0 - } - return float64(m.netErrors.Count()) / float64(m.total.Count()) -} - -// GetResponseCodeRatio calculates ratio of count(startA to endA) / count(startB to endB) -func (m *RTMetrics) ResponseCodeRatio(startA, endA, startB, endB int) float64 { - a := int64(0) - b := int64(0) - for code, v := range m.statusCodes { - if code < endA && code >= startA { - a += v.Count() - } - if code < endB && code >= startB { - b += v.Count() - } - } - if b != 0 { - return float64(a) / float64(b) - } - return 0 -} - -func (m *RTMetrics) Append(other *RTMetrics) error { - if err := m.total.Append(other.total); err != nil { - return err - } - - if err := m.netErrors.Append(other.netErrors); err != nil { - return err - } - - for code, c := range other.statusCodes { - o, ok := m.statusCodes[code] - if ok { - if err := o.Append(c); err != nil { - return err - } - } else { - m.statusCodes[code] = c.Clone() - } - } - - return m.histogram.Append(other.histogram) -} - -func (m *RTMetrics) Record(code int, duration time.Duration) { - m.total.Inc(1) - if code == http.StatusGatewayTimeout || code == http.StatusBadGateway { - m.netErrors.Inc(1) - } - m.recordStatusCode(code) - m.recordLatency(duration) -} - -// GetTotalCount returns total count of processed requests collected. -func (m *RTMetrics) TotalCount() int64 { - return m.total.Count() -} - -// GetNetworkErrorCount returns total count of processed requests observed -func (m *RTMetrics) NetworkErrorCount() int64 { - return m.netErrors.Count() -} - -// GetStatusCodesCounts returns map with counts of the response codes -func (m *RTMetrics) StatusCodesCounts() map[int]int64 { - sc := make(map[int]int64) - for k, v := range m.statusCodes { - if v.Count() != 0 { - sc[k] = v.Count() - } - } - return sc -} - -// GetLatencyHistogram computes and returns resulting histogram with latencies observed. -func (m *RTMetrics) LatencyHistogram() (*HDRHistogram, error) { - return m.histogram.Merged() -} - -func (m *RTMetrics) Reset() { - m.histogram.Reset() - m.total.Reset() - m.netErrors.Reset() - m.statusCodes = make(map[int]*RollingCounter) -} - -func (m *RTMetrics) recordNetError() error { - m.netErrors.Inc(1) - return nil -} - -func (m *RTMetrics) recordLatency(d time.Duration) error { - return m.histogram.RecordLatencies(d, 1) -} - -func (m *RTMetrics) recordStatusCode(statusCode int) error { - if c, ok := m.statusCodes[statusCode]; ok { - c.Inc(1) - return nil - } - c, err := m.newCounter() - if err != nil { - return err - } - c.Inc(1) - m.statusCodes[statusCode] = c - return nil -} - -const ( - counterBuckets = 10 - counterResolution = time.Second - histMin = 1 - histMax = 3600000000 // 1 hour in microseconds - histSignificantFigures = 2 // signigicant figures (1% precision) - histBuckets = 6 // number of sub-histograms in a rolling histogram - histPeriod = 10 * time.Second // roll time -) diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip_test.go deleted file mode 100644 index 1bf750931..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/memmetrics/roundtrip_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package memmetrics - -import ( - "time" - - "github.com/mailgun/timetools" - . "gopkg.in/check.v1" -) - -type RRSuite struct { - tm *timetools.FreezedTime -} - -var _ = Suite(&RRSuite{}) - -func (s *RRSuite) SetUpSuite(c *C) { - s.tm = &timetools.FreezedTime{ - CurrentTime: time.Date(2012, 3, 4, 5, 6, 7, 0, time.UTC), - } -} - -func (s *RRSuite) TestDefaults(c *C) { - rr, err := NewRTMetrics(RTClock(s.tm)) - c.Assert(err, IsNil) - c.Assert(rr, NotNil) - - rr.Record(200, time.Second) - rr.Record(502, 2*time.Second) - rr.Record(200, time.Second) - rr.Record(200, time.Second) - - c.Assert(rr.NetworkErrorCount(), Equals, int64(1)) - c.Assert(rr.TotalCount(), Equals, int64(4)) - c.Assert(rr.StatusCodesCounts(), DeepEquals, map[int]int64{502: 1, 200: 3}) - c.Assert(rr.NetworkErrorRatio(), Equals, float64(1)/float64(4)) - c.Assert(rr.ResponseCodeRatio(500, 503, 200, 300), Equals, 1.0/3.0) - - h, err := rr.LatencyHistogram() - c.Assert(err, IsNil) - c.Assert(int(h.LatencyAtQuantile(100)/time.Second), Equals, 2) - - rr.Reset() - c.Assert(rr.NetworkErrorCount(), Equals, int64(0)) - c.Assert(rr.TotalCount(), Equals, int64(0)) - c.Assert(rr.StatusCodesCounts(), DeepEquals, map[int]int64{}) - c.Assert(rr.NetworkErrorRatio(), Equals, float64(0)) - c.Assert(rr.ResponseCodeRatio(500, 503, 200, 300), Equals, float64(0)) - - h, err = rr.LatencyHistogram() - c.Assert(err, IsNil) - c.Assert(h.LatencyAtQuantile(100), Equals, time.Duration(0)) - -} - -func (s *RRSuite) TestAppend(c *C) { - rr, err := NewRTMetrics(RTClock(s.tm)) - c.Assert(err, IsNil) - c.Assert(rr, NotNil) - - rr.Record(200, time.Second) - rr.Record(502, 2*time.Second) - rr.Record(200, time.Second) - rr.Record(200, time.Second) - - rr2, err := NewRTMetrics(RTClock(s.tm)) - c.Assert(err, IsNil) - c.Assert(rr2, NotNil) - - rr2.Record(200, 3*time.Second) - rr2.Record(501, 3*time.Second) - rr2.Record(200, 3*time.Second) - rr2.Record(200, 3*time.Second) - - c.Assert(rr2.Append(rr), IsNil) - c.Assert(rr2.StatusCodesCounts(), DeepEquals, map[int]int64{501: 1, 502: 1, 200: 6}) - c.Assert(rr2.NetworkErrorCount(), Equals, int64(1)) - - h, err := rr2.LatencyHistogram() - c.Assert(err, IsNil) - c.Assert(int(h.LatencyAtQuantile(100)/time.Second), Equals, 3) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer.go b/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer.go deleted file mode 100644 index 07637d271..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer.go +++ /dev/null @@ -1,431 +0,0 @@ -package roundrobin - -import ( - "fmt" - "net/http" - "net/url" - "sync" - "time" - - "github.com/mailgun/oxy/memmetrics" - "github.com/mailgun/oxy/utils" - "github.com/mailgun/timetools" -) - -// RebalancerOption - functional option setter for rebalancer -type RebalancerOption func(*Rebalancer) error - -// Meter measures server peformance and returns it's relative value via rating -type Meter interface { - Rating() float64 - Record(int, time.Duration) - IsReady() bool -} - -type NewMeterFn func() (Meter, error) - -// Rebalancer increases weights on servers that perform better than others. It also rolls back to original weights -// if the servers have changed. It is designed as a wrapper on top of the roundrobin. -type Rebalancer struct { - // mutex - mtx *sync.Mutex - // As usual, control time in tests - clock timetools.TimeProvider - // Time that freezes state machine to accumulate stats after updating the weights - backoffDuration time.Duration - // Timer is set to give probing some time to take place - timer time.Time - // server records that remember original weights - servers []*rbServer - // next is internal load balancer next in chain - next balancerHandler - // errHandler is HTTP handler called in case of errors - errHandler utils.ErrorHandler - - log utils.Logger - - ratings []float64 - - // creates new meters - newMeter NewMeterFn -} - -func RebalancerLogger(log utils.Logger) RebalancerOption { - return func(r *Rebalancer) error { - r.log = log - return nil - } -} - -func RebalancerClock(clock timetools.TimeProvider) RebalancerOption { - return func(r *Rebalancer) error { - r.clock = clock - return nil - } -} - -func RebalancerBackoff(d time.Duration) RebalancerOption { - return func(r *Rebalancer) error { - r.backoffDuration = d - return nil - } -} - -func RebalancerMeter(newMeter NewMeterFn) RebalancerOption { - return func(r *Rebalancer) error { - r.newMeter = newMeter - return nil - } -} - -// RebalancerErrorHandler is a functional argument that sets error handler of the server -func RebalancerErrorHandler(h utils.ErrorHandler) RebalancerOption { - return func(r *Rebalancer) error { - r.errHandler = h - return nil - } -} - -func NewRebalancer(handler balancerHandler, opts ...RebalancerOption) (*Rebalancer, error) { - rb := &Rebalancer{ - mtx: &sync.Mutex{}, - next: handler, - } - for _, o := range opts { - if err := o(rb); err != nil { - return nil, err - } - } - if rb.clock == nil { - rb.clock = &timetools.RealTime{} - } - if rb.backoffDuration == 0 { - rb.backoffDuration = 10 * time.Second - } - if rb.log == nil { - rb.log = &utils.NOPLogger{} - } - if rb.newMeter == nil { - rb.newMeter = func() (Meter, error) { - rc, err := memmetrics.NewRatioCounter(10, time.Second, memmetrics.RatioClock(rb.clock)) - if err != nil { - return nil, err - } - return &codeMeter{ - r: rc, - codeS: http.StatusInternalServerError, - codeE: http.StatusGatewayTimeout + 1, - }, nil - } - } - if rb.errHandler == nil { - rb.errHandler = utils.DefaultHandler - } - return rb, nil -} - -func (rb *Rebalancer) Servers() []*url.URL { - rb.mtx.Lock() - defer rb.mtx.Unlock() - - return rb.next.Servers() -} - -func (rb *Rebalancer) ServeHTTP(w http.ResponseWriter, req *http.Request) { - pw := &utils.ProxyWriter{W: w} - start := rb.clock.UtcNow() - url, err := rb.next.NextServer() - if err != nil { - rb.errHandler.ServeHTTP(w, req, err) - return - } - - // make shallow copy of request before changing anything to avoid side effects - newReq := *req - newReq.URL = url - rb.next.Next().ServeHTTP(pw, &newReq) - - rb.recordMetrics(url, pw.Code, rb.clock.UtcNow().Sub(start)) - rb.adjustWeights() -} - -func (rb *Rebalancer) recordMetrics(u *url.URL, code int, latency time.Duration) { - rb.mtx.Lock() - defer rb.mtx.Unlock() - if srv, i := rb.findServer(u); i != -1 { - srv.meter.Record(code, latency) - } -} - -func (rb *Rebalancer) reset() { - for _, s := range rb.servers { - s.curWeight = s.origWeight - rb.next.UpsertServer(s.url, Weight(s.origWeight)) - } - rb.timer = rb.clock.UtcNow().Add(-1 * time.Second) - rb.ratings = make([]float64, len(rb.servers)) -} - -func (rb *Rebalancer) Wrap(next balancerHandler) error { - if rb.next != nil { - return fmt.Errorf("already bound to %T", rb.next) - } - rb.next = next - return nil -} - -func (rb *Rebalancer) UpsertServer(u *url.URL, options ...ServerOption) error { - rb.mtx.Lock() - defer rb.mtx.Unlock() - - if err := rb.next.UpsertServer(u, options...); err != nil { - return err - } - weight, _ := rb.next.ServerWeight(u) - if err := rb.upsertServer(u, weight); err != nil { - rb.next.RemoveServer(u) - return err - } - rb.reset() - return nil -} - -func (rb *Rebalancer) RemoveServer(u *url.URL) error { - rb.mtx.Lock() - defer rb.mtx.Unlock() - - return rb.removeServer(u) -} - -func (rb *Rebalancer) removeServer(u *url.URL) error { - _, i := rb.findServer(u) - if i == -1 { - return fmt.Errorf("%v not found", u) - } - if err := rb.next.RemoveServer(u); err != nil { - return err - } - rb.servers = append(rb.servers[:i], rb.servers[i+1:]...) - rb.reset() - return nil -} - -func (rb *Rebalancer) upsertServer(u *url.URL, weight int) error { - if s, i := rb.findServer(u); i != -1 { - s.origWeight = weight - } - meter, err := rb.newMeter() - if err != nil { - return err - } - rbSrv := &rbServer{ - url: utils.CopyURL(u), - origWeight: weight, - curWeight: weight, - meter: meter, - } - rb.servers = append(rb.servers, rbSrv) - return nil -} - -func (r *Rebalancer) findServer(u *url.URL) (*rbServer, int) { - if len(r.servers) == 0 { - return nil, -1 - } - for i, s := range r.servers { - if sameURL(u, s.url) { - return s, i - } - } - return nil, -1 -} - -// Called on every load balancer ServeHTTP call, returns the suggested weights -// on every call, can adjust weights if needed. -func (rb *Rebalancer) adjustWeights() { - rb.mtx.Lock() - defer rb.mtx.Unlock() - - // In this case adjusting weights would have no effect, so do nothing - if len(rb.servers) < 2 { - return - } - // Metrics are not ready - if !rb.metricsReady() { - return - } - if !rb.timerExpired() { - return - } - if rb.markServers() { - if rb.setMarkedWeights() { - rb.setTimer() - } - } else { // No servers that are different by their quality, so converge weights - if rb.convergeWeights() { - rb.setTimer() - } - } -} - -func (rb *Rebalancer) applyWeights() { - for _, srv := range rb.servers { - rb.log.Infof("upsert server %v, weight %v", srv.url, srv.curWeight) - rb.next.UpsertServer(srv.url, Weight(srv.curWeight)) - } -} - -func (rb *Rebalancer) setMarkedWeights() bool { - changed := false - // Increase weights on servers marked as good - for _, srv := range rb.servers { - if srv.good { - weight := increase(srv.curWeight) - if weight <= FSMMaxWeight { - rb.log.Infof("increasing weight of %v from %v to %v", srv.url, srv.curWeight, weight) - srv.curWeight = weight - changed = true - } - } - } - if changed { - rb.normalizeWeights() - rb.applyWeights() - return true - } - return false -} - -func (rb *Rebalancer) setTimer() { - rb.timer = rb.clock.UtcNow().Add(rb.backoffDuration) -} - -func (rb *Rebalancer) timerExpired() bool { - return rb.timer.Before(rb.clock.UtcNow()) -} - -func (rb *Rebalancer) metricsReady() bool { - for _, s := range rb.servers { - if !s.meter.IsReady() { - return false - } - } - return true -} - -// markServers splits servers into two groups of servers with bad and good failure rate. -// It does compare relative performances of the servers though, so if all servers have approximately the same error rate -// this function returns the result as if all servers are equally good. -func (rb *Rebalancer) markServers() bool { - for i, srv := range rb.servers { - rb.ratings[i] = srv.meter.Rating() - } - g, b := memmetrics.SplitFloat64(splitThreshold, 0, rb.ratings) - for i, srv := range rb.servers { - if g[rb.ratings[i]] { - srv.good = true - } else { - srv.good = false - } - } - if len(g) != 0 && len(b) != 0 { - rb.log.Infof("bad: %v good: %v, ratings: %v", b, g, rb.ratings) - } - return len(g) != 0 && len(b) != 0 -} - -func (rb *Rebalancer) convergeWeights() bool { - // If we have previoulsy changed servers try to restore weights to the original state - changed := false - for _, s := range rb.servers { - if s.origWeight == s.curWeight { - continue - } - changed = true - newWeight := decrease(s.origWeight, s.curWeight) - rb.log.Infof("decreasing weight of %v from %v to %v", s.url, s.curWeight, newWeight) - s.curWeight = newWeight - } - if !changed { - return false - } - rb.normalizeWeights() - rb.applyWeights() - return true -} - -func (rb *Rebalancer) weightsGcd() int { - divisor := -1 - for _, w := range rb.servers { - if divisor == -1 { - divisor = w.curWeight - } else { - divisor = gcd(divisor, w.curWeight) - } - } - return divisor -} - -func (rb *Rebalancer) normalizeWeights() { - gcd := rb.weightsGcd() - if gcd <= 1 { - return - } - for _, s := range rb.servers { - s.curWeight = s.curWeight / gcd - } -} - -func increase(weight int) int { - return weight * FSMGrowFactor -} - -func decrease(target, current int) int { - adjusted := current / FSMGrowFactor - if adjusted < target { - return target - } else { - return adjusted - } -} - -// rebalancer server record that keeps track of the original weight supplied by user -type rbServer struct { - url *url.URL - origWeight int // original weight supplied by user - curWeight int // current weight - good bool - meter Meter -} - -const ( - // This is the maximum weight that handler will set for the server - FSMMaxWeight = 4096 - // Multiplier for the server weight - FSMGrowFactor = 4 -) - -type codeMeter struct { - r *memmetrics.RatioCounter - codeS int - codeE int -} - -func (n *codeMeter) Rating() float64 { - return n.r.Ratio() -} - -func (n *codeMeter) Record(code int, d time.Duration) { - if code >= n.codeS && code < n.codeE { - n.r.IncA(1) - } else { - n.r.IncB(1) - } -} - -func (n *codeMeter) IsReady() bool { - return n.r.IsReady() -} - -// splitThreshold tells how far the value should go from the median + median absolute deviation before it is considered an outlier -const splitThreshold = 1.5 diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer_test.go deleted file mode 100644 index 0c2167b5f..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rebalancer_test.go +++ /dev/null @@ -1,344 +0,0 @@ -package roundrobin - -import ( - "net/http" - "net/http/httptest" - "os" - "time" - - "github.com/mailgun/oxy/forward" - "github.com/mailgun/oxy/testutils" - "github.com/mailgun/oxy/utils" - "github.com/mailgun/timetools" - - . "gopkg.in/check.v1" -) - -type RBSuite struct { - clock *timetools.FreezedTime - log utils.Logger -} - -var _ = Suite(&RBSuite{ - clock: &timetools.FreezedTime{ - CurrentTime: time.Date(2012, 3, 4, 5, 6, 7, 0, time.UTC), - }, -}) - -func (s *RBSuite) SetUpSuite(c *C) { - s.log = utils.NewFileLogger(os.Stdout, utils.INFO) -} - -func (s *RBSuite) TestRebalancerNormalOperation(c *C) { - a, b := testutils.NewResponder("a"), testutils.NewResponder("b") - defer a.Close() - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - rb, err := NewRebalancer(lb) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - c.Assert(rb.Servers()[0].String(), Equals, a.URL) - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "a", "a"}) -} - -func (s *RBSuite) TestRebalancerNoServers(c *C) { - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - rb, err := NewRebalancer(lb) - c.Assert(err, IsNil) - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - re, _, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusInternalServerError) -} - -func (s *RBSuite) TestRebalancerRemoveServer(c *C) { - a, b := testutils.NewResponder("a"), testutils.NewResponder("b") - defer a.Close() - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - rb, err := NewRebalancer(lb) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - rb.UpsertServer(testutils.ParseURI(b.URL)) - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "b", "a"}) - - c.Assert(rb.RemoveServer(testutils.ParseURI(a.URL)), IsNil) - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"b", "b", "b"}) -} - -// Test scenario when one server goes down after what it recovers -func (s *RBSuite) TestRebalancerRecovery(c *C) { - a, b := testutils.NewResponder("a"), testutils.NewResponder("b") - defer a.Close() - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - newMeter := func() (Meter, error) { - return &testMeter{}, nil - } - rb, err := NewRebalancer(lb, RebalancerMeter(newMeter), RebalancerClock(s.clock), RebalancerLogger(s.log)) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - rb.UpsertServer(testutils.ParseURI(b.URL)) - rb.servers[0].meter.(*testMeter).rating = 0.3 - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - for i := 0; i < 6; i += 1 { - testutils.Get(proxy.URL) - testutils.Get(proxy.URL) - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, FSMMaxWeight) - - c.Assert(lb.servers[0].weight, Equals, 1) - c.Assert(lb.servers[1].weight, Equals, FSMMaxWeight) - - // server a is now recovering, the weights should go back to the original state - rb.servers[0].meter.(*testMeter).rating = 0 - - for i := 0; i < 6; i += 1 { - testutils.Get(proxy.URL) - testutils.Get(proxy.URL) - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, 1) - - // Make sure we have applied the weights to the inner load balancer - c.Assert(lb.servers[0].weight, Equals, 1) - c.Assert(lb.servers[1].weight, Equals, 1) -} - -// Test scenario when increaing the weight on good endpoints made it worse -func (s *RBSuite) TestRebalancerCascading(c *C) { - a, b, d := testutils.NewResponder("a"), testutils.NewResponder("b"), testutils.NewResponder("d") - defer a.Close() - defer b.Close() - defer d.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - newMeter := func() (Meter, error) { - return &testMeter{}, nil - } - rb, err := NewRebalancer(lb, RebalancerMeter(newMeter), RebalancerClock(s.clock), RebalancerLogger(s.log)) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - rb.UpsertServer(testutils.ParseURI(b.URL)) - rb.UpsertServer(testutils.ParseURI(d.URL)) - rb.servers[0].meter.(*testMeter).rating = 0.3 - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - for i := 0; i < 6; i += 1 { - testutils.Get(proxy.URL) - testutils.Get(proxy.URL) - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - - // We have increased the load, and the situation became worse as the other servers started failing - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, FSMMaxWeight) - c.Assert(rb.servers[2].curWeight, Equals, FSMMaxWeight) - - // server a is now recovering, the weights should go back to the original state - rb.servers[0].meter.(*testMeter).rating = 0.3 - rb.servers[1].meter.(*testMeter).rating = 0.2 - rb.servers[2].meter.(*testMeter).rating = 0.2 - - for i := 0; i < 6; i += 1 { - testutils.Get(proxy.URL) - testutils.Get(proxy.URL) - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - - // the algo reverted it back - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, 1) - c.Assert(rb.servers[2].curWeight, Equals, 1) -} - -// Test scenario when all servers started failing -func (s *RBSuite) TestRebalancerAllBad(c *C) { - a, b, d := testutils.NewResponder("a"), testutils.NewResponder("b"), testutils.NewResponder("d") - defer a.Close() - defer b.Close() - defer d.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - newMeter := func() (Meter, error) { - return &testMeter{}, nil - } - rb, err := NewRebalancer(lb, RebalancerMeter(newMeter), RebalancerClock(s.clock), RebalancerLogger(s.log)) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - rb.UpsertServer(testutils.ParseURI(b.URL)) - rb.UpsertServer(testutils.ParseURI(d.URL)) - rb.servers[0].meter.(*testMeter).rating = 0.12 - rb.servers[1].meter.(*testMeter).rating = 0.13 - rb.servers[2].meter.(*testMeter).rating = 0.11 - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - for i := 0; i < 6; i += 1 { - testutils.Get(proxy.URL) - testutils.Get(proxy.URL) - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - - // load balancer does nothing - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, 1) - c.Assert(rb.servers[2].curWeight, Equals, 1) -} - -// Removing the server resets the state -func (s *RBSuite) TestRebalancerReset(c *C) { - a, b, d := testutils.NewResponder("a"), testutils.NewResponder("b"), testutils.NewResponder("d") - defer a.Close() - defer b.Close() - defer d.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - newMeter := func() (Meter, error) { - return &testMeter{}, nil - } - rb, err := NewRebalancer(lb, RebalancerMeter(newMeter), RebalancerClock(s.clock), RebalancerLogger(s.log)) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - rb.UpsertServer(testutils.ParseURI(b.URL)) - rb.UpsertServer(testutils.ParseURI(d.URL)) - rb.servers[0].meter.(*testMeter).rating = 0.3 - rb.servers[1].meter.(*testMeter).rating = 0 - rb.servers[2].meter.(*testMeter).rating = 0 - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - for i := 0; i < 6; i += 1 { - testutils.Get(proxy.URL) - testutils.Get(proxy.URL) - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - - // load balancer changed weights - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, FSMMaxWeight) - c.Assert(rb.servers[2].curWeight, Equals, FSMMaxWeight) - - // Removing servers has reset the state - rb.RemoveServer(testutils.ParseURI(d.URL)) - - c.Assert(rb.servers[0].curWeight, Equals, 1) - c.Assert(rb.servers[1].curWeight, Equals, 1) -} - -func (s *RBSuite) TestRebalancerLive(c *C) { - a, b := testutils.NewResponder("a"), testutils.NewResponder("b") - defer a.Close() - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - rb, err := NewRebalancer(lb, RebalancerBackoff(time.Millisecond), RebalancerClock(s.clock)) - c.Assert(err, IsNil) - - rb.UpsertServer(testutils.ParseURI(a.URL)) - rb.UpsertServer(testutils.ParseURI(b.URL)) - rb.UpsertServer(testutils.ParseURI("http://localhost:62345")) - - proxy := httptest.NewServer(rb) - defer proxy.Close() - - for i := 0; i < 1000; i += 1 { - testutils.Get(proxy.URL) - if i%10 == 0 { - s.clock.CurrentTime = s.clock.CurrentTime.Add(rb.backoffDuration + time.Second) - } - } - - // load balancer changed weights - c.Assert(rb.servers[0].curWeight, Equals, FSMMaxWeight) - c.Assert(rb.servers[1].curWeight, Equals, FSMMaxWeight) - c.Assert(rb.servers[2].curWeight, Equals, 1) -} - -type testMeter struct { - rating float64 - notReady bool -} - -func (tm *testMeter) Rating() float64 { - return tm.rating -} - -func (tm *testMeter) Record(int, time.Duration) { -} - -func (tm *testMeter) IsReady() bool { - return !tm.notReady -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr.go b/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr.go deleted file mode 100644 index bbd4a6182..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr.go +++ /dev/null @@ -1,267 +0,0 @@ -// package roundrobin implements dynamic weighted round robin load balancer http handler -package roundrobin - -import ( - "fmt" - "net/http" - "net/url" - "sync" - - "github.com/mailgun/oxy/utils" -) - -// Weight is an optional functional argument that sets weight of the server -func Weight(w int) ServerOption { - return func(s *server) error { - if w < 0 { - return fmt.Errorf("Weight should be >= 0") - } - s.weight = w - return nil - } -} - -// ErrorHandler is a functional argument that sets error handler of the server -func ErrorHandler(h utils.ErrorHandler) LBOption { - return func(s *RoundRobin) error { - s.errHandler = h - return nil - } -} - -type RoundRobin struct { - mutex *sync.Mutex - next http.Handler - errHandler utils.ErrorHandler - // Current index (starts from -1) - index int - servers []*server - currentWeight int -} - -func New(next http.Handler, opts ...LBOption) (*RoundRobin, error) { - rr := &RoundRobin{ - next: next, - index: -1, - mutex: &sync.Mutex{}, - servers: []*server{}, - } - for _, o := range opts { - if err := o(rr); err != nil { - return nil, err - } - } - if rr.errHandler == nil { - rr.errHandler = utils.DefaultHandler - } - return rr, nil -} - -func (r *RoundRobin) Next() http.Handler { - return r.next -} - -func (r *RoundRobin) ServeHTTP(w http.ResponseWriter, req *http.Request) { - url, err := r.NextServer() - if err != nil { - r.errHandler.ServeHTTP(w, req, err) - return - } - // make shallow copy of request before chaning anything to avoid side effects - newReq := *req - newReq.URL = url - r.next.ServeHTTP(w, &newReq) -} - -func (r *RoundRobin) NextServer() (*url.URL, error) { - srv, err := r.nextServer() - if err != nil { - return nil, err - } - return utils.CopyURL(srv.url), nil -} - -func (r *RoundRobin) nextServer() (*server, error) { - r.mutex.Lock() - defer r.mutex.Unlock() - - if len(r.servers) == 0 { - return nil, fmt.Errorf("no servers in the pool") - } - - // The algo below may look messy, but is actually very simple - // it calculates the GCD and subtracts it on every iteration, what interleaves servers - // and allows us not to build an iterator every time we readjust weights - - // GCD across all enabled servers - gcd := r.weightGcd() - // Maximum weight across all enabled servers - max := r.maxWeight() - - for { - r.index = (r.index + 1) % len(r.servers) - if r.index == 0 { - r.currentWeight = r.currentWeight - gcd - if r.currentWeight <= 0 { - r.currentWeight = max - if r.currentWeight == 0 { - return nil, fmt.Errorf("all servers have 0 weight") - } - } - } - srv := r.servers[r.index] - if srv.weight >= r.currentWeight { - return srv, nil - } - } - // We did full circle and found no available servers - return nil, fmt.Errorf("no available servers") -} - -func (r *RoundRobin) RemoveServer(u *url.URL) error { - r.mutex.Lock() - defer r.mutex.Unlock() - - e, index := r.findServerByURL(u) - if e == nil { - return fmt.Errorf("server not found") - } - r.servers = append(r.servers[:index], r.servers[index+1:]...) - r.resetState() - return nil -} - -func (rr *RoundRobin) Servers() []*url.URL { - rr.mutex.Lock() - defer rr.mutex.Unlock() - - out := make([]*url.URL, len(rr.servers)) - for i, srv := range rr.servers { - out[i] = srv.url - } - return out -} - -func (rr *RoundRobin) ServerWeight(u *url.URL) (int, bool) { - rr.mutex.Lock() - defer rr.mutex.Unlock() - - if s, _ := rr.findServerByURL(u); s != nil { - return s.weight, true - } - return -1, false -} - -// In case if server is already present in the load balancer, returns error -func (rr *RoundRobin) UpsertServer(u *url.URL, options ...ServerOption) error { - rr.mutex.Lock() - defer rr.mutex.Unlock() - - if u == nil { - return fmt.Errorf("server URL can't be nil") - } - - if s, _ := rr.findServerByURL(u); s != nil { - for _, o := range options { - if err := o(s); err != nil { - return err - } - } - rr.resetState() - return nil - } - - srv := &server{url: utils.CopyURL(u)} - for _, o := range options { - if err := o(srv); err != nil { - return err - } - } - - if srv.weight == 0 { - srv.weight = defaultWeight - } - - rr.servers = append(rr.servers, srv) - rr.resetState() - return nil -} - -func (r *RoundRobin) resetIterator() { - r.index = -1 - r.currentWeight = 0 -} - -func (r *RoundRobin) resetState() { - r.resetIterator() -} - -func (r *RoundRobin) findServerByURL(u *url.URL) (*server, int) { - if len(r.servers) == 0 { - return nil, -1 - } - for i, s := range r.servers { - if sameURL(u, s.url) { - return s, i - } - } - return nil, -1 -} - -func (rr *RoundRobin) maxWeight() int { - max := -1 - for _, s := range rr.servers { - if s.weight > max { - max = s.weight - } - } - return max -} - -func (rr *RoundRobin) weightGcd() int { - divisor := -1 - for _, s := range rr.servers { - if divisor == -1 { - divisor = s.weight - } else { - divisor = gcd(divisor, s.weight) - } - } - return divisor -} - -func gcd(a, b int) int { - for b != 0 { - a, b = b, a%b - } - return a -} - -// ServerOption provides various options for server, e.g. weight -type ServerOption func(*server) error - -// LBOption provides options for load balancer -type LBOption func(*RoundRobin) error - -// Set additional parameters for the server can be supplied when adding server -type server struct { - url *url.URL - // Relative weight for the enpoint to other enpoints in the load balancer - weight int -} - -const defaultWeight = 1 - -func sameURL(a, b *url.URL) bool { - return a.Path == b.Path && a.Host == b.Host && a.Scheme == b.Scheme -} - -type balancerHandler interface { - Servers() []*url.URL - ServeHTTP(w http.ResponseWriter, req *http.Request) - ServerWeight(u *url.URL) (int, bool) - RemoveServer(u *url.URL) error - UpsertServer(u *url.URL, options ...ServerOption) error - NextServer() (*url.URL, error) - Next() http.Handler -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr_test.go deleted file mode 100644 index 494c04644..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/roundrobin/rr_test.go +++ /dev/null @@ -1,216 +0,0 @@ -package roundrobin - -import ( - "net/http" - "net/http/httptest" - "testing" - - "github.com/mailgun/oxy/forward" - "github.com/mailgun/oxy/testutils" - "github.com/mailgun/oxy/utils" - - . "gopkg.in/check.v1" -) - -func TestRR(t *testing.T) { TestingT(t) } - -type RRSuite struct{} - -var _ = Suite(&RRSuite{}) - -func (s *RRSuite) TestNoServers(c *C) { - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - re, _, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusInternalServerError) -} - -func (s *RRSuite) TestRemoveBadServer(c *C) { - lb, err := New(nil) - c.Assert(err, IsNil) - - c.Assert(lb.RemoveServer(testutils.ParseURI("http://google.com")), NotNil) -} - -func (s *RRSuite) TestCustomErrHandler(c *C) { - errHandler := utils.ErrorHandlerFunc(func(w http.ResponseWriter, req *http.Request, err error) { - w.WriteHeader(http.StatusTeapot) - w.Write([]byte(http.StatusText(http.StatusTeapot))) - }) - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd, ErrorHandler(errHandler)) - c.Assert(err, IsNil) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - re, _, err := testutils.Get(proxy.URL) - c.Assert(err, IsNil) - c.Assert(re.StatusCode, Equals, http.StatusTeapot) -} - -func (s *RRSuite) TestOneServer(c *C) { - a := testutils.NewResponder("a") - defer a.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - lb.UpsertServer(testutils.ParseURI(a.URL)) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "a", "a"}) -} - -func (s *RRSuite) TestSimple(c *C) { - a := testutils.NewResponder("a") - defer a.Close() - - b := testutils.NewResponder("b") - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - lb.UpsertServer(testutils.ParseURI(a.URL)) - lb.UpsertServer(testutils.ParseURI(b.URL)) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "b", "a"}) -} - -func (s *RRSuite) TestRemoveServer(c *C) { - a := testutils.NewResponder("a") - defer a.Close() - - b := testutils.NewResponder("b") - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - lb.UpsertServer(testutils.ParseURI(a.URL)) - lb.UpsertServer(testutils.ParseURI(b.URL)) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "b", "a"}) - - lb.RemoveServer(testutils.ParseURI(a.URL)) - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"b", "b", "b"}) -} - -func (s *RRSuite) TestUpsertSame(c *C) { - a := testutils.NewResponder("a") - defer a.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - c.Assert(lb.UpsertServer(testutils.ParseURI(a.URL)), IsNil) - c.Assert(lb.UpsertServer(testutils.ParseURI(a.URL)), IsNil) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "a", "a"}) -} - -func (s *RRSuite) TestUpsertWeight(c *C) { - a := testutils.NewResponder("a") - defer a.Close() - - b := testutils.NewResponder("b") - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - c.Assert(lb.UpsertServer(testutils.ParseURI(a.URL)), IsNil) - c.Assert(lb.UpsertServer(testutils.ParseURI(b.URL)), IsNil) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 3), DeepEquals, []string{"a", "b", "a"}) - - c.Assert(lb.UpsertServer(testutils.ParseURI(b.URL), Weight(3)), IsNil) - - c.Assert(seq(c, proxy.URL, 4), DeepEquals, []string{"b", "b", "a", "b"}) -} - -func (s *RRSuite) TestWeighted(c *C) { - a := testutils.NewResponder("a") - defer a.Close() - - b := testutils.NewResponder("b") - defer b.Close() - - fwd, err := forward.New() - c.Assert(err, IsNil) - - lb, err := New(fwd) - c.Assert(err, IsNil) - - lb.UpsertServer(testutils.ParseURI(a.URL), Weight(3)) - lb.UpsertServer(testutils.ParseURI(b.URL), Weight(2)) - - proxy := httptest.NewServer(lb) - defer proxy.Close() - - c.Assert(seq(c, proxy.URL, 6), DeepEquals, []string{"a", "a", "b", "a", "b", "a"}) - - w, ok := lb.ServerWeight(testutils.ParseURI(a.URL)) - c.Assert(w, Equals, 3) - c.Assert(ok, Equals, true) - - w, ok = lb.ServerWeight(testutils.ParseURI(b.URL)) - c.Assert(w, Equals, 2) - c.Assert(ok, Equals, true) - - w, ok = lb.ServerWeight(testutils.ParseURI("http://caramba:4000")) - c.Assert(w, Equals, -1) - c.Assert(ok, Equals, false) -} - -func seq(c *C, url string, repeat int) []string { - out := []string{} - for i := 0; i < repeat; i++ { - _, body, err := testutils.Get(url) - c.Assert(err, IsNil) - out = append(out, string(body)) - } - return out -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth.go deleted file mode 100644 index 9017d8ec9..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth.go +++ /dev/null @@ -1,41 +0,0 @@ -package utils - -import ( - "encoding/base64" - "fmt" - "strings" -) - -type BasicAuth struct { - Username string - Password string -} - -func (ba *BasicAuth) String() string { - encoded := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.Username, ba.Password))) - return fmt.Sprintf("Basic %s", encoded) -} - -func ParseAuthHeader(header string) (*BasicAuth, error) { - values := strings.Fields(header) - if len(values) != 2 { - return nil, fmt.Errorf(fmt.Sprintf("Failed to parse header '%s'", header)) - } - - auth_type := strings.ToLower(values[0]) - if auth_type != "basic" { - return nil, fmt.Errorf("Expected basic auth type, got '%s'", auth_type) - } - - encoded_string := values[1] - decoded_string, err := base64.StdEncoding.DecodeString(encoded_string) - if err != nil { - return nil, fmt.Errorf("Failed to parse header '%s', base64 failed: %s", header, err) - } - - values = strings.SplitN(string(decoded_string), ":", 2) - if len(values) != 2 { - return nil, fmt.Errorf("Failed to parse header '%s', expected separator ':'", header) - } - return &BasicAuth{Username: values[0], Password: values[1]}, nil -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth_test.go deleted file mode 100644 index 069040d08..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/auth_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package utils - -import ( - . "gopkg.in/check.v1" -) - -type AuthSuite struct { -} - -var _ = Suite(&AuthSuite{}) - -//Just to make sure we don't panic, return err and not -//username and pass and cover the function -func (s *AuthSuite) TestParseBadHeaders(c *C) { - headers := []string{ - //just empty string - "", - //missing auth type - "justplainstring", - //unknown auth type - "Whut justplainstring", - //invalid base64 - "Basic Shmasic", - //random encoded string - "Basic YW55IGNhcm5hbCBwbGVhcw==", - } - for _, h := range headers { - _, err := ParseAuthHeader(h) - c.Assert(err, NotNil) - } -} - -//Just to make sure we don't panic, return err and not -//username and pass and cover the function -func (s *AuthSuite) TestParseSuccess(c *C) { - headers := []struct { - Header string - Expected BasicAuth - }{ - { - "Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==", - BasicAuth{Username: "Aladdin", Password: "open sesame"}, - }, - // Make sure that String() produces valid header - { - (&BasicAuth{Username: "Alice", Password: "Here's bob"}).String(), - BasicAuth{Username: "Alice", Password: "Here's bob"}, - }, - //empty pass - { - "Basic QWxhZGRpbjo=", - BasicAuth{Username: "Aladdin", Password: ""}, - }, - } - for _, h := range headers { - request, err := ParseAuthHeader(h.Header) - c.Assert(err, IsNil) - c.Assert(request.Username, Equals, h.Expected.Username) - c.Assert(request.Password, Equals, h.Expected.Password) - - } -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler.go deleted file mode 100644 index 003fc0319..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler.go +++ /dev/null @@ -1,38 +0,0 @@ -package utils - -import ( - "io" - "net" - "net/http" -) - -type ErrorHandler interface { - ServeHTTP(w http.ResponseWriter, req *http.Request, err error) -} - -var DefaultHandler ErrorHandler = &StdHandler{} - -type StdHandler struct { -} - -func (e *StdHandler) ServeHTTP(w http.ResponseWriter, req *http.Request, err error) { - statusCode := http.StatusInternalServerError - if e, ok := err.(net.Error); ok { - if e.Timeout() { - statusCode = http.StatusGatewayTimeout - } else { - statusCode = http.StatusBadGateway - } - } else if err == io.EOF { - statusCode = http.StatusBadGateway - } - w.WriteHeader(statusCode) - w.Write([]byte(http.StatusText(statusCode))) -} - -type ErrorHandlerFunc func(http.ResponseWriter, *http.Request, error) - -// ServeHTTP calls f(w, r). -func (f ErrorHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request, err error) { - f(w, r, err) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler_test.go deleted file mode 100644 index 9044100b1..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/handler_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package utils - -import ( - "bytes" - "net/http" - "net/http/httptest" - "strings" - - . "gopkg.in/check.v1" -) - -type UtilsSuite struct{} - -var _ = Suite(&UtilsSuite{}) - -func (s *UtilsSuite) TestDefaultHandlerErrors(c *C) { - srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - h := w.(http.Hijacker) - conn, _, _ := h.Hijack() - conn.Close() - })) - defer srv.Close() - - request, err := http.NewRequest("GET", srv.URL, strings.NewReader("")) - c.Assert(err, IsNil) - - _, err = http.DefaultTransport.RoundTrip(request) - - w := NewBufferWriter(NopWriteCloser(&bytes.Buffer{})) - - DefaultHandler.ServeHTTP(w, nil, err) - - c.Assert(w.Code, Equals, http.StatusBadGateway) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/logging.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/logging.go deleted file mode 100644 index 7b036b7cd..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/logging.go +++ /dev/null @@ -1,86 +0,0 @@ -package utils - -import ( - "io" - "log" -) - -var NullLogger Logger = &NOPLogger{} - -// Logger defines a simple logging interface -type Logger interface { - Infof(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) -} - -type FileLogger struct { - info *log.Logger - warn *log.Logger - error *log.Logger -} - -func NewFileLogger(w io.Writer, lvl LogLevel) *FileLogger { - l := &FileLogger{} - flag := log.Ldate | log.Ltime | log.Lmicroseconds - if lvl <= INFO { - l.info = log.New(w, "INFO: ", flag) - } - if lvl <= WARN { - l.warn = log.New(w, "WARN: ", flag) - } - if lvl <= ERROR { - l.error = log.New(w, "ERR: ", flag) - } - return l -} - -func (f *FileLogger) Infof(format string, args ...interface{}) { - if f.info == nil { - return - } - f.info.Printf(format, args...) -} - -func (f *FileLogger) Warningf(format string, args ...interface{}) { - if f.warn == nil { - return - } - f.warn.Printf(format, args...) -} - -func (f *FileLogger) Errorf(format string, args ...interface{}) { - if f.error == nil { - return - } - f.error.Printf(format, args...) -} - -type NOPLogger struct { -} - -func (*NOPLogger) Infof(format string, args ...interface{}) { - -} -func (*NOPLogger) Warningf(format string, args ...interface{}) { -} - -func (*NOPLogger) Errorf(format string, args ...interface{}) { -} - -func (*NOPLogger) Info(string) { - -} -func (*NOPLogger) Warning(string) { -} - -func (*NOPLogger) Error(string) { -} - -type LogLevel int - -const ( - INFO = iota - WARN - ERROR -) diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils.go deleted file mode 100644 index 12542e3d3..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils.go +++ /dev/null @@ -1,121 +0,0 @@ -package utils - -import ( - "io" - "net/http" - "net/url" -) - -// ProxyWriter helps to capture response headers and status code -// from the ServeHTTP. It can be safely passed to ServeHTTP handler, -// wrapping the real response writer. -type ProxyWriter struct { - W http.ResponseWriter - Code int -} - -func (p *ProxyWriter) StatusCode() int { - if p.Code == 0 { - // per contract standard lib will set this to http.StatusOK if not set - // by user, here we avoid the confusion by mirroring this logic - return http.StatusOK - } - return p.Code -} - -func (p *ProxyWriter) Header() http.Header { - return p.W.Header() -} - -func (p *ProxyWriter) Write(buf []byte) (int, error) { - return p.W.Write(buf) -} - -func (p *ProxyWriter) WriteHeader(code int) { - p.Code = code - p.W.WriteHeader(code) -} - -func (p *ProxyWriter) Flush() { - if f, ok := p.W.(http.Flusher); ok { - f.Flush() - } -} - -func NewBufferWriter(w io.WriteCloser) *BufferWriter { - return &BufferWriter{ - W: w, - H: make(http.Header), - } -} - -type BufferWriter struct { - H http.Header - Code int - W io.WriteCloser -} - -func (b *BufferWriter) Close() error { - return b.W.Close() -} - -func (b *BufferWriter) Header() http.Header { - return b.H -} - -func (b *BufferWriter) Write(buf []byte) (int, error) { - return b.W.Write(buf) -} - -// WriteHeader sets rw.Code. -func (b *BufferWriter) WriteHeader(code int) { - b.Code = code -} - -type nopWriteCloser struct { - io.Writer -} - -func (*nopWriteCloser) Close() error { return nil } - -// NopCloser returns a WriteCloser with a no-op Close method wrapping -// the provided Writer w. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// CopyURL provides update safe copy by avoiding shallow copying User field -func CopyURL(i *url.URL) *url.URL { - out := *i - if i.User != nil { - out.User = &(*i.User) - } - return &out -} - -// CopyHeaders copies http headers from source to destination, it -// does not overide, but adds multiple headers -func CopyHeaders(dst, src http.Header) { - for k, vv := range src { - for _, v := range vv { - dst.Add(k, v) - } - } -} - -// HasHeaders determines whether any of the header names is present in the http headers -func HasHeaders(names []string, headers http.Header) bool { - for _, h := range names { - if headers.Get(h) != "" { - return true - } - } - return false -} - -// RemoveHeaders removes the header with the given names from the headers map -func RemoveHeaders(headers http.Header, names ...string) { - for _, h := range names { - headers.Del(h) - } -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils_test.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils_test.go deleted file mode 100644 index 795cdcc23..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/netutils_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package utils - -import ( - "net/http" - "net/url" - "testing" - - . "gopkg.in/check.v1" -) - -func TestUtils(t *testing.T) { TestingT(t) } - -type NetUtilsSuite struct{} - -var _ = Suite(&NetUtilsSuite{}) - -// Make sure copy does it right, so the copied url -// is safe to alter without modifying the other -func (s *NetUtilsSuite) TestCopyUrl(c *C) { - urlA := &url.URL{ - Scheme: "http", - Host: "localhost:5000", - Path: "/upstream", - Opaque: "opaque", - RawQuery: "a=1&b=2", - Fragment: "#hello", - User: &url.Userinfo{}, - } - urlB := CopyURL(urlA) - c.Assert(urlB, DeepEquals, urlA) - urlB.Scheme = "https" - c.Assert(urlB, Not(DeepEquals), urlA) -} - -// Make sure copy headers is not shallow and copies all headers -func (s *NetUtilsSuite) TestCopyHeaders(c *C) { - source, destination := make(http.Header), make(http.Header) - source.Add("a", "b") - source.Add("c", "d") - - CopyHeaders(destination, source) - - c.Assert(destination.Get("a"), Equals, "b") - c.Assert(destination.Get("c"), Equals, "d") - - // make sure that altering source does not affect the destination - source.Del("a") - c.Assert(source.Get("a"), Equals, "") - c.Assert(destination.Get("a"), Equals, "b") -} - -func (s *NetUtilsSuite) TestHasHeaders(c *C) { - source := make(http.Header) - source.Add("a", "b") - source.Add("c", "d") - c.Assert(HasHeaders([]string{"a", "f"}, source), Equals, true) - c.Assert(HasHeaders([]string{"i", "j"}, source), Equals, false) -} - -func (s *NetUtilsSuite) TestRemoveHeaders(c *C) { - source := make(http.Header) - source.Add("a", "b") - source.Add("a", "m") - source.Add("c", "d") - RemoveHeaders(source, "a") - c.Assert(source.Get("a"), Equals, "") - c.Assert(source.Get("c"), Equals, "d") -} diff --git a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/source.go b/Godeps/_workspace/src/github.com/mailgun/oxy/utils/source.go deleted file mode 100644 index 4ed89e13b..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/oxy/utils/source.go +++ /dev/null @@ -1,57 +0,0 @@ -package utils - -import ( - "fmt" - "net/http" - "strings" -) - -// ExtractSource extracts the source from the request, e.g. that may be client ip, or particular header that -// identifies the source. amount stands for amount of connections the source consumes, usually 1 for connection limiters -// error should be returned when source can not be identified -type SourceExtractor interface { - Extract(req *http.Request) (token string, amount int64, err error) -} - -type ExtractorFunc func(req *http.Request) (token string, amount int64, err error) - -func (f ExtractorFunc) Extract(req *http.Request) (string, int64, error) { - return f(req) -} - -type ExtractSource func(req *http.Request) - -func NewExtractor(variable string) (SourceExtractor, error) { - if variable == "client.ip" { - return ExtractorFunc(extractClientIP), nil - } - if variable == "request.host" { - return ExtractorFunc(extractHost), nil - } - if strings.HasPrefix(variable, "request.header.") { - header := strings.TrimPrefix(variable, "request.header.") - if len(header) == 0 { - return nil, fmt.Errorf("Wrong header: %s", header) - } - return makeHeaderExtractor(header), nil - } - return nil, fmt.Errorf("Unsupported limiting variable: '%s'", variable) -} - -func extractClientIP(req *http.Request) (string, int64, error) { - vals := strings.SplitN(req.RemoteAddr, ":", 2) - if len(vals[0]) == 0 { - return "", 0, fmt.Errorf("Failed to parse client IP: %v", req.RemoteAddr) - } - return vals[0], 1, nil -} - -func extractHost(req *http.Request) (string, int64, error) { - return req.Host, 1, nil -} - -func makeHeaderExtractor(header string) SourceExtractor { - return ExtractorFunc(func(req *http.Request) (string, int64, error) { - return req.Header.Get(header), 1, nil - }) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/predicate/.gitignore b/Godeps/_workspace/src/github.com/mailgun/predicate/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/predicate/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/Godeps/_workspace/src/github.com/mailgun/predicate/LICENSE b/Godeps/_workspace/src/github.com/mailgun/predicate/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/predicate/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/Godeps/_workspace/src/github.com/mailgun/predicate/Makefile b/Godeps/_workspace/src/github.com/mailgun/predicate/Makefile deleted file mode 100644 index 824d89a1c..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/predicate/Makefile +++ /dev/null @@ -1,13 +0,0 @@ - -test: clean - go test -v ./... -cover - -clean: - find . -name flymake_* -delete - -cover: clean - go test -v . -coverprofile=/tmp/coverage.out - go tool cover -html=/tmp/coverage.out - -sloccount: - find . -name "*.go" -print0 | xargs -0 wc -l diff --git a/Godeps/_workspace/src/github.com/mailgun/predicate/README.md b/Godeps/_workspace/src/github.com/mailgun/predicate/README.md deleted file mode 100644 index 736f1c6ba..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/predicate/README.md +++ /dev/null @@ -1,53 +0,0 @@ -Predicate -========= - -Predicate package used to create interpreted mini languages with Go syntax - mostly to define -various predicates for configuration, e.g. - -``` -Latency() > 40 || ErrorRate() > 0.5. -``` - -Here's an example of fully functional predicate language to deal with division remainders: - -```go -// takes number and returns true or false -type numberPredicate func(v int) bool - -// Converts one number to another -type numberMapper func(v int) int - -// Function that creates predicate to test if the remainder is 0 -func divisibleBy(divisor int) numberPredicate { - return func(v int) bool { - return v%divisor == 0 - } -} - -// Function - logical operator AND that combines predicates -func numberAND(a, b numberPredicate) numberPredicate { - return func(v int) bool { - return a(v) && b(v) - } -} - -func main(){ - // Create a new parser and define the supported operators and methods - p, err := NewParser(Def{ - Operators: Operators{ - AND: numberAND, - }, - Functions: map[string]interface{}{ - "DivisibleBy": divisibleBy, - }, - }) - - pr, err := p.Parse("DivisibleBy(2) && DivisibleBy(3)") - if err == nil { - fmt.Fatalf("Error: %v", err) - } - pr.(numberPredicate)(2) // false - pr.(numberPredicate)(3) // false - pr.(numberPredicate)(6) // true -} -``` diff --git a/Godeps/_workspace/src/github.com/mailgun/predicate/parse.go b/Godeps/_workspace/src/github.com/mailgun/predicate/parse.go deleted file mode 100644 index 7ff673070..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/predicate/parse.go +++ /dev/null @@ -1,181 +0,0 @@ -package predicate - -import ( - "fmt" - "go/ast" - "go/parser" - "go/token" - "reflect" - "strconv" -) - -func NewParser(d Def) (Parser, error) { - return &predicateParser{d: d}, nil -} - -type predicateParser struct { - d Def -} - -func (p *predicateParser) Parse(in string) (interface{}, error) { - expr, err := parser.ParseExpr(in) - if err != nil { - return nil, err - } - return p.parseNode(expr) -} - -func (p *predicateParser) parseNode(node ast.Node) (interface{}, error) { - switch n := node.(type) { - case *ast.BasicLit: - return literalToValue(n) - case *ast.BinaryExpr: - x, err := p.parseNode(n.X) - if err != nil { - return nil, err - } - y, err := p.parseNode(n.Y) - if err != nil { - return nil, err - } - return p.joinPredicates(n.Op, x, y) - case *ast.CallExpr: - // We expect function that will return predicate - name, err := getIdentifier(n.Fun) - if err != nil { - return nil, err - } - fn, err := p.getFunction(name) - if err != nil { - return nil, err - } - arguments, err := collectLiterals(n.Args) - if err != nil { - return nil, err - } - return callFunction(fn, arguments) - case *ast.ParenExpr: - return p.parseNode(n.X) - } - return nil, fmt.Errorf("unsupported %T", node) -} - -func (p *predicateParser) getFunction(name string) (interface{}, error) { - v, ok := p.d.Functions[name] - if !ok { - return nil, fmt.Errorf("unsupported function: %s", name) - } - return v, nil -} - -func (p *predicateParser) joinPredicates(op token.Token, a, b interface{}) (interface{}, error) { - joinFn, err := p.getJoinFunction(op) - if err != nil { - return nil, err - } - return callFunction(joinFn, []interface{}{a, b}) -} - -func (p *predicateParser) getJoinFunction(op token.Token) (interface{}, error) { - var fn interface{} - switch op { - case token.LAND: - fn = p.d.Operators.AND - case token.LOR: - fn = p.d.Operators.OR - case token.GTR: - fn = p.d.Operators.GT - case token.GEQ: - fn = p.d.Operators.GE - case token.LSS: - fn = p.d.Operators.LT - case token.LEQ: - fn = p.d.Operators.LE - case token.EQL: - fn = p.d.Operators.EQ - case token.NEQ: - fn = p.d.Operators.NEQ - } - if fn == nil { - return nil, fmt.Errorf("%v is not supported", op) - } - return fn, nil -} - -func getIdentifier(node ast.Node) (string, error) { - id, ok := node.(*ast.Ident) - if !ok { - return "", fmt.Errorf("expected identifier, got: %T", node) - } - return id.Name, nil -} - -func collectLiterals(nodes []ast.Expr) ([]interface{}, error) { - out := make([]interface{}, len(nodes)) - for i, n := range nodes { - l, ok := n.(*ast.BasicLit) - if !ok { - return nil, fmt.Errorf("expected literal, got %T", n) - } - val, err := literalToValue(l) - if err != nil { - return nil, err - } - out[i] = val - } - return out, nil -} - -func literalToValue(a *ast.BasicLit) (interface{}, error) { - switch a.Kind { - case token.FLOAT: - value, err := strconv.ParseFloat(a.Value, 64) - if err != nil { - return nil, fmt.Errorf("failed to parse argument: %s, error: %s", a.Value, err) - } - return value, nil - case token.INT: - value, err := strconv.Atoi(a.Value) - if err != nil { - return nil, fmt.Errorf("failed to parse argument: %s, error: %s", a.Value, err) - } - return value, nil - case token.STRING: - value, err := strconv.Unquote(a.Value) - if err != nil { - return nil, fmt.Errorf("failed to parse argument: %s, error: %s", a.Value, err) - } - return value, nil - } - return nil, fmt.Errorf("unsupported function argument type: '%v'", a.Kind) -} - -func callFunction(f interface{}, args []interface{}) (v interface{}, err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("%s", r) - } - }() - arguments := make([]reflect.Value, len(args)) - for i, a := range args { - arguments[i] = reflect.ValueOf(a) - } - fn := reflect.ValueOf(f) - - ret := fn.Call(arguments) - switch len(ret) { - case 1: - return ret[0].Interface(), nil - case 2: - v, e := ret[0].Interface(), ret[1].Interface() - if e == nil { - return v, nil - } - err, ok := e.(error) - if !ok { - return nil, fmt.Errorf("expected error as a second return value, got %T", e) - } - return v, err - } - return nil, fmt.Errorf("expected at least one return argument for '%v'", fn) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/predicate/predicate.go b/Godeps/_workspace/src/github.com/mailgun/predicate/predicate.go deleted file mode 100644 index f9e22ce67..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/predicate/predicate.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Predicate package used to create interpreted mini languages with Go syntax - mostly to define -various predicates for configuration, e.g. Latency() > 40 || ErrorRate() > 0.5. - -Here's an example of fully functional predicate language to deal with division remainders: - - // takes number and returns true or false - type numberPredicate func(v int) bool - - // Converts one number to another - type numberMapper func(v int) int - - // Function that creates predicate to test if the remainder is 0 - func divisibleBy(divisor int) numberPredicate { - return func(v int) bool { - return v%divisor == 0 - } - } - - // Function - logical operator AND that combines predicates - func numberAND(a, b numberPredicate) numberPredicate { - return func(v int) bool { - return a(v) && b(v) - } - } - - p, err := NewParser(Def{ - Operators: Operators{ - AND: numberAND, - }, - Functions: map[string]interface{}{ - "DivisibleBy": divisibleBy, - }, - }) - - pr, err := p.Parse("DivisibleBy(2) && DivisibleBy(3)") - if err == nil { - fmt.Fatalf("Error: %v", err) - } - pr.(numberPredicate)(2) // false - pr.(numberPredicate)(3) // false - pr.(numberPredicate)(6) // true -*/ -package predicate - -// Def contains supported operators (e.g. LT, GT) and functions passed in as a map. -type Def struct { - Operators Operators - // Function matching is case sensitive, e.g. Len is different from len - Functions map[string]interface{} -} - -// Operators contain functions for equality and logical comparison. -type Operators struct { - EQ interface{} - NEQ interface{} - - LT interface{} - GT interface{} - - LE interface{} - GE interface{} - - OR interface{} - AND interface{} -} - -// Parser takes the string with expression and calls the operators and functions. -type Parser interface { - Parse(string) (interface{}, error) -} diff --git a/Godeps/_workspace/src/github.com/mailgun/timetools/LICENSE b/Godeps/_workspace/src/github.com/mailgun/timetools/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/timetools/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/Godeps/_workspace/src/github.com/mailgun/timetools/README.md b/Godeps/_workspace/src/github.com/mailgun/timetools/README.md deleted file mode 100644 index 107bc54e1..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/timetools/README.md +++ /dev/null @@ -1,4 +0,0 @@ -timetools -========= - -Go library with various time utilities used at Mailgun. diff --git a/Godeps/_workspace/src/github.com/mailgun/timetools/provider.go b/Godeps/_workspace/src/github.com/mailgun/timetools/provider.go deleted file mode 100644 index 82245941c..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/timetools/provider.go +++ /dev/null @@ -1,104 +0,0 @@ -package timetools - -import ( - "sync" - "time" -) - -// TimeProvider is an interface we use to mock time in tests. -type TimeProvider interface { - UtcNow() time.Time - Sleep(time.Duration) - After(time.Duration) <-chan time.Time -} - -// RealTime is a real clock time, used in production. -type RealTime struct { -} - -func (*RealTime) UtcNow() time.Time { - return time.Now().UTC() -} - -func (*RealTime) Sleep(d time.Duration) { - time.Sleep(d) -} - -func (*RealTime) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -// FreezedTime is manually controlled time for use in tests. -type FreezedTime struct { - CurrentTime time.Time -} - -func (t *FreezedTime) UtcNow() time.Time { - return t.CurrentTime -} - -func (t *FreezedTime) Sleep(d time.Duration) { - t.CurrentTime = t.CurrentTime.Add(d) -} - -func (t *FreezedTime) After(d time.Duration) <-chan time.Time { - t.Sleep(d) - c := make(chan time.Time, 1) - c <- t.CurrentTime - return c -} - -type sleepableTime struct { - currentTime time.Time - waiters map[time.Time][]chan time.Time - mu sync.Mutex -} - -// SleepProvider returns a TimeProvider that has good fakes for -// time.Sleep and time.After. Both functions will behave as if -// time is frozen until you call AdvanceTimeBy, at which point -// any calls to time.Sleep that should return do return and -// any ticks from time.After that should happen do happen. -func SleepProvider(currentTime time.Time) TimeProvider { - return &sleepableTime{ - currentTime: currentTime, - waiters: make(map[time.Time][]chan time.Time), - } -} - -func (t *sleepableTime) UtcNow() time.Time { - return t.currentTime -} - -func (t *sleepableTime) Sleep(d time.Duration) { - <-t.After(d) -} - -func (t *sleepableTime) After(d time.Duration) <-chan time.Time { - t.mu.Lock() - defer t.mu.Unlock() - - c := make(chan time.Time, 1) - until := t.currentTime.Add(d) - t.waiters[until] = append(t.waiters[until], c) - return c -} - -// AdvanceTimeBy simulates advancing time by some time.Duration d. -// This function panics if st is not the result of a call to -// SleepProvider. -func AdvanceTimeBy(st TimeProvider, d time.Duration) { - t := st.(*sleepableTime) - t.mu.Lock() - defer t.mu.Unlock() - - t.currentTime = t.currentTime.Add(d) - for k, v := range t.waiters { - if k.Before(t.currentTime) { - for _, c := range v { - c <- t.currentTime - } - delete(t.waiters, k) - } - } -} diff --git a/Godeps/_workspace/src/github.com/mailgun/timetools/provider_test.go b/Godeps/_workspace/src/github.com/mailgun/timetools/provider_test.go deleted file mode 100644 index fac564ff6..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/timetools/provider_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package timetools - -import ( - "fmt" - "testing" - "time" -) - -var _ = fmt.Printf // for testing - -func TestRealTimeUtcNow(t *testing.T) { - rt := RealTime{} - - rtNow := rt.UtcNow() - atNow := time.Now().UTC() - - // times shouldn't be exact - if rtNow.Equal(atNow) { - t.Errorf("rt.UtcNow() = time.Now.UTC(), %v = %v, should be slightly different", rtNow, atNow) - } - - rtNowPlusOne := atNow.Add(1 * time.Second) - rtNowMinusOne := atNow.Add(-1 * time.Second) - - // but should be pretty close - if atNow.After(rtNowPlusOne) || atNow.Before(rtNowMinusOne) { - t.Errorf("timedelta between rt.UtcNow() and time.Now.UTC() greater than 2 seconds, %v, %v", rtNow, atNow) - } -} - -func TestFreezeTimeUtcNow(t *testing.T) { - tm := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - ft := FreezedTime{tm} - - if !tm.Equal(ft.UtcNow()) { - t.Errorf("ft.UtcNow() != time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), %v, %v", tm, ft) - } -} - -func itTicks(c <-chan time.Time) bool { - select { - case <-c: - return true - case <-time.After(time.Millisecond): - return false - } -} - -func TestSleepableTime(t *testing.T) { - tm := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) - st := SleepProvider(tm) - - if !tm.Equal(st.UtcNow()) { - t.Errorf("st.UtcNow() != time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), %v, %v", tm, st) - } - - // Check After with no AdvanceTimeBy - if itTicks(st.After(time.Nanosecond)) { - t.Error("Got tick from After before calling AdvanceTimeBy") - } - - // Check After with one call to AdvanceTimeBy - c0 := st.After(time.Hour) - AdvanceTimeBy(st, 2*time.Hour) - if !itTicks(c0) { - t.Error("Didn't get tick from After after calling AdvanceTimeBy") - } - - // Check After with multiple calls to AdvanceTimeBy - c0 = st.After(time.Hour) - AdvanceTimeBy(st, 20*time.Minute) - if itTicks(c0) { - t.Error("Got tick from After before we AdvanceTimeBy'd enough") - } - AdvanceTimeBy(st, 20*time.Minute) - if itTicks(c0) { - t.Error("Got tick from After before we AdvanceTimeBy'd enough") - } - AdvanceTimeBy(st, 40*time.Minute) - if !itTicks(c0) { - t.Error("Didn't get tick from After after we AdvanceTimeBy'd enough") - } - - // Check Sleep with no AdvanceTimeBy - c1 := make(chan time.Time) - go func() { - st.Sleep(time.Nanosecond) - c1 <- st.UtcNow() - }() - if itTicks(c1) { - t.Error("Sleep returned before we called AdvanceTimeBy") - } -} diff --git a/Godeps/_workspace/src/github.com/mailgun/timetools/rfc2822time.go b/Godeps/_workspace/src/github.com/mailgun/timetools/rfc2822time.go deleted file mode 100644 index 604f94a85..000000000 --- a/Godeps/_workspace/src/github.com/mailgun/timetools/rfc2822time.go +++ /dev/null @@ -1,59 +0,0 @@ -package timetools - -import ( - "strconv" - "time" - - "gopkg.in/mgo.v2/bson" -) - -// We use RFC2822 format for timestamps everywhere ('Thu, 13 Oct 2011 18:02:00 GMT'), but -// by default Go's JSON package uses another format when decoding/encoding timestamps. -// That's why we declare our own timestamp type and implement its marshal/unmarshal methods -// so JSON package decodes/encodes it properly. -type RFC2822Time time.Time - -func NewRFC2822Time(timestamp int64) RFC2822Time { - return RFC2822Time(time.Unix(timestamp, 0).UTC()) -} - -func (t RFC2822Time) Unix() int64 { - return time.Time(t).Unix() -} - -func (t RFC2822Time) IsZero() bool { - return time.Time(t).IsZero() -} - -func (t RFC2822Time) MarshalJSON() ([]byte, error) { - return []byte(strconv.Quote(time.Time(t).Format(time.RFC1123))), nil -} - -func (t *RFC2822Time) UnmarshalJSON(s []byte) error { - q, err := strconv.Unquote(string(s)) - if err != nil { - return err - } - if *(*time.Time)(t), err = time.Parse(time.RFC1123, q); err != nil { - return err - } - return nil -} - -func (t RFC2822Time) GetBSON() (interface{}, error) { - return time.Time(t), nil -} - -func (t *RFC2822Time) SetBSON(raw bson.Raw) error { - var result time.Time - err := raw.Unmarshal(&result) - if err != nil { - return err - } - *t = RFC2822Time(result) - return nil -} - -func (t RFC2822Time) String() string { - return time.Time(t).Format(time.RFC1123) -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/.travis.yml b/Godeps/_workspace/src/github.com/op/go-logging/.travis.yml deleted file mode 100644 index 70e012b81..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - tip diff --git a/Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS b/Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS deleted file mode 100644 index 958416ef1..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS +++ /dev/null @@ -1,5 +0,0 @@ -Alec Thomas -Guilhem Lettron -Ivan Daniluk -Nimi Wariboko Jr -Róbert Selvek diff --git a/Godeps/_workspace/src/github.com/op/go-logging/LICENSE b/Godeps/_workspace/src/github.com/op/go-logging/LICENSE deleted file mode 100644 index f1f6cfcef..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013 Örjan Persson. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Godeps/_workspace/src/github.com/op/go-logging/README.md b/Godeps/_workspace/src/github.com/op/go-logging/README.md deleted file mode 100644 index 566955085..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/README.md +++ /dev/null @@ -1,89 +0,0 @@ -## Golang logging library - -[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/op/go-logging) [![build](https://img.shields.io/travis/op/go-logging.svg?style=flat)](https://travis-ci.org/op/go-logging) - -Package logging implements a logging infrastructure for Go. Its output format -is customizable and supports different logging backends like syslog, file and -memory. Multiple backends can be utilized with different log levels per backend -and logger. - -## Example - -Let's have a look at an [example](examples/example.go) which demonstrates most -of the features found in this library. - -[![Example Output](examples/example.png)](examples/example.go) - -```go -package main - -import ( - "os" - - "github.com/op/go-logging" -) - -var log = logging.MustGetLogger("example") - -// Example format string. Everything except the message has a custom color -// which is dependent on the log level. Many fields have a custom output -// formatting too, eg. the time returns the hour down to the milli second. -var format = logging.MustStringFormatter( - "%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}", -) - -// Password is just an example type implementing the Redactor interface. Any -// time this is logged, the Redacted() function will be called. -type Password string - -func (p Password) Redacted() interface{} { - return logging.Redact(string(p)) -} - -func main() { - // For demo purposes, create two backend for os.Stderr. - backend1 := logging.NewLogBackend(os.Stderr, "", 0) - backend2 := logging.NewLogBackend(os.Stderr, "", 0) - - // For messages written to backend2 we want to add some additional - // information to the output, including the used log level and the name of - // the function. - backend2Formatter := logging.NewBackendFormatter(backend2, format) - - // Only errors and more severe messages should be sent to backend1 - backend1Leveled := logging.AddModuleLevel(backend1) - backend1Leveled.SetLevel(logging.ERROR, "") - - // Set the backends to be used. - logging.SetBackend(backend1Leveled, backend2Formatter) - - log.Debug("debug %s", Password("secret")) - log.Info("info") - log.Notice("notice") - log.Warning("warning") - log.Error("err") - log.Critical("crit") -} -``` - -## Installing - -### Using *go get* - - $ go get github.com/op/go-logging - -After this command *go-logging* is ready to use. Its source will be in: - - $GOROOT/src/pkg/github.com/op/go-logging - -You can use `go get -u` to update the package. - -## Documentation - -For docs, see http://godoc.org/github.com/op/go-logging or run: - - $ godoc github.com/op/go-logging - -## Additional resources - -* [wslog](https://godoc.org/github.com/cryptix/go/logging/wslog) -- exposes log messages through a WebSocket. diff --git a/Godeps/_workspace/src/github.com/op/go-logging/backend.go b/Godeps/_workspace/src/github.com/op/go-logging/backend.go deleted file mode 100644 index 6cd589cab..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/backend.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -// defaultBackend is the backend used for all logging calls. -var defaultBackend LeveledBackend - -// Backend is the interface which a log backend need to implement to be able to -// be used as a logging backend. -type Backend interface { - Log(Level, int, *Record) error -} - -// Set backend replaces the backend currently set with the given new logging -// backend. -func SetBackend(backends ...Backend) LeveledBackend { - var backend Backend - if len(backends) == 1 { - backend = backends[0] - } else { - backend = MultiLogger(backends...) - } - - defaultBackend = AddModuleLevel(backend) - return defaultBackend -} - -// SetLevel sets the logging level for the specified module. The module -// corresponds to the string specified in GetLogger. -func SetLevel(level Level, module string) { - defaultBackend.SetLevel(level, module) -} - -// GetLevel returns the logging level for the specified module. -func GetLevel(module string) Level { - return defaultBackend.GetLevel(module) -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/example_test.go b/Godeps/_workspace/src/github.com/op/go-logging/example_test.go deleted file mode 100644 index deb8900e7..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/example_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package logging - -import "os" - -func Example() { - // This call is for testing purposes and will set the time to unix epoch. - InitForTesting(DEBUG) - - var log = MustGetLogger("example") - - // For demo purposes, create two backend for os.Stdout. - // - // os.Stderr should most likely be used in the real world but then the - // "Output:" check in this example would not work. - backend1 := NewLogBackend(os.Stdout, "", 0) - backend2 := NewLogBackend(os.Stdout, "", 0) - - // For messages written to backend2 we want to add some additional - // information to the output, including the used log level and the name of - // the function. - var format = MustStringFormatter( - "%{time:15:04:05.000} %{shortfunc} %{level:.1s} %{message}", - ) - backend2Formatter := NewBackendFormatter(backend2, format) - - // Only errors and more severe messages should be sent to backend2 - backend2Leveled := AddModuleLevel(backend2Formatter) - backend2Leveled.SetLevel(ERROR, "") - - // Set the backends to be used and the default level. - SetBackend(backend1, backend2Leveled) - - log.Debug("debug %s", "arg") - log.Error("error") - - // Output: - // debug arg - // error - // 00:00:00.000 Example E error -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/examples/example.go b/Godeps/_workspace/src/github.com/op/go-logging/examples/example.go deleted file mode 100644 index b2880eb34..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/examples/example.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import ( - "os" - - "github.com/op/go-logging" -) - -var log = logging.MustGetLogger("example") - -// Example format string. Everything except the message has a custom color -// which is dependent on the log level. Many fields have a custom output -// formatting too, eg. the time returns the hour down to the milli second. -var format = logging.MustStringFormatter( - "%{color}%{time:15:04:05.000} %{shortfunc} ▶ %{level:.4s} %{id:03x}%{color:reset} %{message}", -) - -// Password is just an example type implementing the Redactor interface. Any -// time this is logged, the Redacted() function will be called. -type Password string - -func (p Password) Redacted() interface{} { - return logging.Redact(string(p)) -} - -func main() { - // For demo purposes, create two backend for os.Stderr. - backend1 := logging.NewLogBackend(os.Stderr, "", 0) - backend2 := logging.NewLogBackend(os.Stderr, "", 0) - - // For messages written to backend2 we want to add some additional - // information to the output, including the used log level and the name of - // the function. - backend2Formatter := logging.NewBackendFormatter(backend2, format) - - // Only errors and more severe messages should be sent to backend1 - backend1Leveled := logging.AddModuleLevel(backend1) - backend1Leveled.SetLevel(logging.ERROR, "") - - // Set the backends to be used. - logging.SetBackend(backend1Leveled, backend2Formatter) - - log.Debug("debug %s", Password("secret")) - log.Info("info") - log.Notice("notice") - log.Warning("warning") - log.Error("err") - log.Critical("crit") -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/examples/example.png b/Godeps/_workspace/src/github.com/op/go-logging/examples/example.png deleted file mode 100644 index ff3392b7a5b37a09d44db6a8f5ef293c5c610791..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17675 zcmZsD1yo#3mu+Lg0}1XB+}+*X-QC??g1fuBOK^7$?%ojGA-Kcq@1ObKyqPz(R?E75 zZ&%m3r>geZXP=HxkP}CQ!-fL@0Em(jB1!-Nm@(*mJPbJKS!ph(8T10{Aff3D0KlXC z^8f>6W@Ca5Lc2)Hib8L}Lw}`0xvKW70ssgBk|KgC9_tsnt{y6*E4kiRFJ|nQmqf9U z;3SZ(ge403ZOYXimIVsy9~ST>5{_X)QK*!c=s%H3FE3jXUE3f1{B6F&{Jtc!T#swH z<>NszY|G$%oXl{ao}BIqTIJ?~fC04}fdY6!P*WBFwbB4klK}&@3~*3mf`WvEG=T%1 zHHL}|YVnkWAB_h0n?6Ry@~OGzE!mrkQBXNc;K{K((FSCfJt%w#m$cq(R`x%uE7Es( z&90ox5y)9SlS{Vs{M#gIqjE7XgTBZS>{?8zr1p3@o=dqkSDv7(vh6g6#y%MBblr?% z-~-1}zjZ_(-C?4z(T$B{R*Xf3b2v7}hd}@U!=*E=`VTEue$f=b)}1(!h}g%%^)Q5N z_VwY&pDSgfc~7kE3Sy@%C<<-e{Md{oWw1&~x33l9Y0VyKtq4oCk3OFRDDGCO{^~Rg z5M;O-HneG#u))cx`#QV?c*;d~RIRF24h?g84Q+$H_%mv(eDs4AF2QNv{NG%JYLgdv zdYu(+I(TEg-Awa0vy`Cww`kUqe%-(tf(h*N`&?mynxb`N@+PPGm?Acb4Gqog~+hD9XksQAMWyvCMGK_(Umk04r_T8`LD(OuJXha zr>b}iGO5n?z+7;w5f?lbql0mZl+}MAuCyF2RU+34#bmnSoZT{U z>rtzMZtVFpW-`@D$s=<;A79V1#mxzIZ}Egg0QT#bna{C}pFiQ@7N&DzW)du%u!26K z$Yrnp{ED)t_pkD20$q!n==@{D)F>UXk?CM;F5M;~7NZf1jgo|?i|h1Zf}d*)1CVyW z@9D#7Fz(TtUae8FH7c1xNEG^MNYD0#YY=t)4Yn!SIzm`il4jIv0|sqPo7T} zE7fT-f4$Iex;J)bmhtB`S;#jz>pwh#RlO3)7k|thj~*5EDpc1)cGv#XYt&JvZ!lZU z7kJi6bDreld47BZPHk_TncLs-woc4sQ>eTCJ8HwO((CK5eCy3P z7M^u?s{;&-Ple|x9HaHIjbM(K*%QRF39Xqw$8^9Tk$Qfg=MB@6t1d$`m3L;iiFiqZ z^S7O%=b#JO3cTMPu##>Kt6f<=kKh17h187qzx|tuE3V^mQ1NX3ZX+|cGfL6G(I4jIC^56gIRF><$GFyIB{6R5x>i04h)-i z(It+N{4})|`-9}})A7_ut?^`Xl%AtKb7&>|>%@-PZ?n~O2ZmjRk{s{L%-Y@F?2GThH~wQQB5KL#8;*L5xuL!bQJ-zsn{!DLZ>$|s5@PCFrC%WvMhXG?%`X96B%J4DC)5)(=QAo5Ln zCCM5MQo%%r|Do;krC&}RuA!MJ$5%h@Wdo-W{u%Oyt5gM7ON~;^nxG1;{ix?@yu|xB zofF6tKykeoay5goObrJ3)IadYQAS=$K*A}KNOrp}m>Dr(_Oi0P^$1@v&3&S5T%wQ2 zz2ZnVIKkv|pWXd9$h?PAs!2s1(u=7=i9dcfFRb%QpWh|%It4?`0wZ2QK+XZaGL@OW z#9?zIwp96hi50rne`7vrG2ppBec0Bks2-su=>FwD9DArKljTNLOcgtr{@fxaZ+AYU z>hx+%6`^I1E)EWW3AAV*Fvh`UGH%NXBZYth?04CXcFF!A2LOt-Vu&rE)eqBCiIPUv zvv4=P+I`&PF0^IAJM>YigIzo z-?GDRE=goWoZw-qGr1TBrlnlEZhqkt1}aa!zIE*L;>Y|9{?2XI^4>@9_-E~!_=KRm zAfZO*k93m7!tp?F`H~*{h8m}YIMV8r_ zN*ZF>@?QMP=BOG%dR>7(yW?duAPRbxGqpF&LJy2B26d&)^0SMe}#viww41h{s)IMs9Jw zNNj5n0iTPS7mi!yHoIJJ@XeP`71@$Mw{LnQD3L=Si(x0iRxy>oO^y-yz>u@I4Evz% zaYg6wc~n2*%oypEIqr4iA~HnVqOtY0dHM>freuNhklits&P97avQx-ZuPDbw9G%xk zdyf__kk%Xnn9qTE+Niplvs6ObD@BK@@U&g8t4fR=mI?%eFK=SrO>2^Ds9QdiMr&Fc zzskw{`jhaeFG;1j5Se`^_o!1YeqaZiVKDtA zPTIu5x^d%;tM3L!G)+;G{ZU#yM$LG|IUOe~h};qKy04i12B~XV3tCK5^3Y1YQC9cw zb!N@B=L&~)%x>?4$0fDxkwIrDe*;j6Ac@5CVU5& zib~Zi#T!V&ja1Wco6GfwHib=gpIZk#O{0((H$zl8~stW z^x1phzi=L;vM*{1DTnat#F8c|!9=jleXk|eblk|s>(vnyy7-C}lORHYeVW0W>dQNn z{Da!(`R$LgpK)bfb&t=V;qzw{|BrYY|K~S>LJ@Ob)X|;yH(AVF9CV*@O_s^F&TEYb zvlgNHI~c~wp3Eh&{MHt6-4u_fj=5cV47t+ZQAR(I*w>1dZO1y!T+Bg(rgw^M>ONR&>{`KVNe7S=s-+-EP7~WRJ zu20^SL?=l3VmhXNRQMXxt&$0#x? z+6~l;yJC}(LUNAEVNYZ8JFXx^N)1hTc)Z#KPUF{3<>~I5D`h^PC4@IiKj5krZ^lv} zd$X|?k-Vqru9#16blH=IR*8rv^y10eoGU@G*TQ_Me)WtYE+V?!iO^-ASONgL9@gs5 z^rU-v7&6mt7=Ea4egfCgw*HqQl*yJd)qo8{J zq_Djp%dVQa26(L=NT#s2(FzULGD_hs_6 z?5nP%{cMH2Av%lYWw9U7Mq~F}-2Cf%u0q|Kh2iI19KVc@^K`rfu-!yETVvFl5%@P0Uz`?s*qrcy6IvK@a)Y@$N zOD^($JWMcq!T}hFuM!%~!XDjBqNEq}fjtg8o)JAjiS@TNv-VWCD)u$Hio1H__ODU-sV&P$5eHJ}=*kH?F@!F-av16FeLFCyZPD zjigtPp;>h)(m@0WAbQj5eADqVIHaq@bE&IjnnvftBZzON10crrPzAC zA>PaZIGx{Cm;cng_4-P1xqAM_pxw$BvKLl0N`%q-LDJISv>ixWMP^#}PO4uqC>OkB zdpHt@mDZz|8xBePd>Y017Bjj~qFW=c=yPzi%daf>)RaJqlv}*i>@5yO8M)ofr6&wY zy^Rfu3NVQ+woLMpjVrEjozqOuwYF{~0~mPuKCEK2H+SqYN+M%;pr>D4_LP=JfO9+- zF%?sJwrLQy7h548#cTHsb-oR~GJ@YDqp8D~3}TmvN)=Fn93H5@ygK5n`%x(-!V6L< zxbj~_U>5SkK%q*xOn6P!`b0L|_!1yL6e!MFoX4F#Cjd~0U(BcfMQS56{E3>A0o%8VPGjk(*qzvC?c@ZNPK~!@OWyh##%o1y>!(@- z8R`UO#pfW@S2CHVC7=G!-De^|rPLs0&owL%2Dh0D4$_tyDMEO&tM*v zpY(UTAGnI+?!&MZ(tB>`8tsCm%gdWsY1Bn!J?CuK9~`d1*t2#36jQb$;X1xrI{8ne zXQizF5Xs{Ind8#=3>NGJa52ox20&b_qneig)bx_=J78l0VWwQwZ)?EuRFFR zm~MCd1~jFV5*H2rhFa??^ao`?2om zrUJ+)1QMb(uSwE>Jb4Zdlx$oP7pYGnS`?THkDZNAj8{F;#u?<72C)-tbz8>j|GvV| zXaD+4u`ssU9_P53t6Jqh{WrJuS8KwD7fS-0$B9rUMj3&6g?6kRP~5eu=doIc{(_p? zsHh}0O&Pnrtse#36LN{<#< zydeS{h^z9@-7N~nMw5hLGOFUJE%kC0RUvhs;s1?6d74B_jgT$7j#EbaROnn&UF+p_ z!{@sB21W~$ebIfn2paf@*UoO;hIpni2!|O;C`WE_&B#!YT189F?GV_0EFPKDUThUm z>RJ2mrmXO1ak;C9k{BOto+y`9Ka~iq@uk|^eNN(jU%p3=Wan65I%gjM- z`<7D^;H?%tD?S~cPxWsjI24k0uieG2@bEuP-7d3ydQ4QSv;8EX(QPwQSZ0s2as`oZ*DSEy70g-%hE$AUZn4e{%po;Bl4e#}P$YiRxlfqu^AnrIm?qctHNJc1a z&Op~M&v*Ee*C9mfG^ELwL&b&k)zTceg7I1Zfij@X<;O2-<#9X_#fW?SdveL|0C4g? z?W2)z9q$1d&ZA%48`%WF09dW}aTiKIWs2krhQQVZGtJw-W?yF{olGJgaj_CoMA_&1 zVLhBo>h<qw4FyDwA1v?~lZF!Tu|4M0noDmr9nh50wVD5vE$w|I{_mBcO;C)&4zOr zM_k)&^VQokCZ1CteId2f{L)nHCm1GzmU`}P7j|@G3nfz}Q+ey})kQ$=H>HcmGA$KS zW+z*zaf*=^T+3#dTIHtRFA%*M+RL#IOa%q$j(tyxWSVosv@WF4;AAu!EdATZF}1;p ze}*U_vN=(s_xMc8Wp8k7w8GYx1F$Hon7IIwhdZ{v+Y%{Ev)ntU>?$-eiT z1DYBdjiOkvYbiqWb`EXadv#XEh~hLlokurSOlx_k8o>-7tefqae^ zfr7PUHnB0=<)l$O&b6YddmUD`8+q&FuFbFdu%_4D-i2V!gS_n&<8Jtj*1vYIi+&vV zhfsdJod?D5^Z3mhzifWW{xaFSw{-XC`%Z3AKyA3c#@$Ab%-5DPU+ye9c6zF@r+%en zBWF1l$I*S{mo#7UFKTdkrXHsQO~ry)SCd+Uqd5LZ}u9fQ>L~l6wgQOR~gpVvZq|Ox{1xM4vDdH=F13w zcKo=EgHN*`ZH^;kh$_J8Nk1NAG-J+jze-yazgYJw_>+?x0GhL&u0!m6Tzsmj&W2}5 zfosmo?{Bc$N{RTl8;=-9kinNGCAR)8y0ulnW=PMKCzNvPtBbW6ttO8ZM&%}=qqzv( z)`<^hN+=%bm#dJtJX=Ap_UM4z{~)lhku=NxFPZ&6*zE9Lc)Se|jAVn&`NP@LdU|*I z#J+97JehiVauSyilVxqv9L6UtwcttxZ`jH0!vp+18d*tE)8@wWs(Ral8+^*PZoh1S zBCL~p%dwZ1#gl`BrjT@Lr#!0NsjXP-S%M*dSo&820LaITelPWJQ@u4dIqWI(PIAfl zVYIp?pL2w5AliRseU&TYuT-9(GiygKARE+mC|}kKF$p*Frua_4YgdzD^waZ^=npx6c$J z1XM!IF95k+z}S3Z@47avU^$p{pt;99C$0>UlGHfd!zESia$}iZ7XJp#${d|8Wa#6( zeQ58Kl{^j;wSTYO7=Ik4#b!UjIzttt&_KRuZ|DOvprJB8`Eh6+d*+Nw#CMnpk9`NNfqD$_O5|49Zgjdt-%n#p z-md{#KF)jZS(z)1&cLp%zn-~T?s>S)i&*V1r;Q>dj`rKl(wjHElk?}@3r6GyUS2iX z!N8{-PdF>a*G4^MNP531CHH1N(ZLI1ME}yjf+XCDyj0VrJv;`4L2&anZ?j@ksjX91 zyIkuuEV4YWW@I z2kYx4XG*rE8CY5Khn9i-H8?i}25_c>?E-977tp$mN)eRsLAFxzc z<%vQgTM?ql0e}J8W3s<0X~l*^3Zzrbt4B3`wBp@o-l}oNMOq8f)>M?x&IB3823=T0 zP-_0j$$=_LXIvU4bwU$dK7tJ{H(Ur=Uq3H?j%BRtL{Cx(Bi7N0V0w*Z?9A0Ixff1H zw|09CP|DU+`wQL-d(SG6r0kYkgOUVbW+PzVuFB&Tr9~CVmhMa|n%`#{dGD;tvN0M* z$Je@6lQ-6?ex5s7g4O#Y;_Y7gbTYTX-k{2*H)&hGQao2Hs`$K8*P2DYpZFwo8#jp9 zs{5U7o=@uWvro%~_RuR40a_qc&*O#Vu?m*$@ES^&E#CT;eD)BuhZiDpxlpXnW(mj+ zEc=MN{V7(f|5kSrm2A z`GT5W(0tNoNteCYkSGR(E)A=w>|c013mSB{y}vYFj`hkM$g{LI8arC)E|NDiV5%nn z*sWK{;|uxwfH&NS#s}d;D?Zau(VnYL2!fM8>zaR1YD>-|e4)Q)@ljN=H8ra^VrZp$ z)gof-TVSR==)7`D;l;z2r!LCBR2&#lo$2`u!sJ-_-ICBM>F2UrI-~5QwIR!njb7Z= z&AXd5A#$u!=2lLukR#Nk_JP@Am}|_w;I>I4OYxl^JzTgOdYuL8OUi{YX_Ry-`@M*g zl8W}|L8e;)LlB$)>4Wo`+Z=0_h4M;~Y&UG+qt9r=!7CF9mUkMx)}1G_EZ~9*BmLl4 zN>vcGKd{N?BhZz96!R<2=@7^dBW0f|J(m^vYxTQrrs{9v51f58VV;nfp$g&;%G7|| z-@EN>-;#ip?>d2*?O|+ix#hN46dxF0GHuVU{=`w(~|?#o9*m z(~pv^LFc~kqkUr9;n&~?>(3W(>@91(T`8_3yM!Rv6is((1eMFZz)awNU>rS zD{wI!v%W2(<8NJ}>v&sEtrb^4k)6dA4{^@J?@gdnND8ign(Mbcy4?ra#?^v?cGt?M z8yh)s6B;QPD1>YAsZn|i?hDt|akp47@+>)2GVr0lx;|&UZDF-LpGL0NZNuvNhc`|$ zq3;3fg5S7acPn<@3m1^6A_du6rJewC#wT-RrB1&8n0q_N@BVDI@>ha>}cbi+f zRE4@vEQuswEcFF7z|NZ6vBOy&i46w|@E8BY+_YIi4jdrCDo7{+?M=NzIb>Y{VLZHS zlM}jY`pAUx0xbhKDGUaHq*GFyT!pdd;K3|VV$cDmR55A2gW?n0=BQdsTj{XEsyi}@ zP&S(DBz8wJWNhLJvgwxCpPkb(c79Ar!e9c|GbWL>%Vi2XkbzBAN{K2FL%$!RtZOD~ zSaIX1txa8^jZpytyKUIVS1{=jQ*X^*@5_woNvXG&${3n)4VI8owqoy5sK{s464DF$ z98EtO6S1kCaVq7w(GsJr)Fj-m>qaN69s8V^*emvyT*UGMnVl-aM7@^d&T^}@k`l?S z^TN0{M6bs)wV{hv{BvqPiA~;cFd*#+IyvUS6e5(Vu>^V3r+C8;Z0yKTmtA+pd;fFN zoRn|7oXW-nn$jYc0+}i@dhzyvqms>CM4zkk8Omic_HT`>ayJCIzJxaRb*vM(z-UHD zV~;pDrs*mg>jG-QNBliz<7lkMm|94nd$#y5;+Q~1-7l-!H_tIl^p33`G&KrGB{vganQLfA6{cw_KDm)v@vZaJnW{VZp;NMaAaT+7S^1%>lhM?!hCEkWR6& z?_2>P5K=^R{ zM+2b=oRh6%W@$u&>vV@H`fJtQ<~nL&8>jX5SE6>fP>-8`fJ786Ee=)vq#k9AiOK!* zl+}K??>e1i$LeiuB}b_z72Zw{g?0@YohmhjHJa7H zbjHCuljxtXNKaPxH1U?JpU##B0VKMWEjqklw{BR`op0N(-^J%% z^xUm4!x?x-|K>ffh9k-TKcU6HBpGkR0u(<##Kd4+hWt_g!K{0jF=tmyGt%$Cg5>f; z-3peytJJ4^d(fo)%Z4uw<0`*NHU2tD<9N~rR=m7Dj-#zeIL zTheoT6vpM!LWmZKtNzdk!%z@DPW=F+j$FnC^sU|=-eudq9kxI5>(x#zG;@bp+I3qM z8<#Vd+WpDtPV+92a^}o+;|8<3IC(h`a5g9O{+{scT-tR;A6|etiEsI`tLJ#R?OXym zqQ|kI>_>b;gM^7WCm5t7!}xMzJh5@qn3N34M>0g0J+^u(QFLwij0f6vn77NjLjnLQ zLR%FZnn%=&!KFSKX-{sO>T6Ouw=<$GX>mPh47RKw&Bf^r^VH_`*k~wu>~Zb$)j+Rq zYay|WAZt3RkzpUoGRisnJAP3^i7K_+QE}6TI7Os5Q2tFH(eP;7c12_3+__{J)DNSZ z`}Qv7n%fUrD2Ko{C*b1+q08l+-lDntceY#P&ksNSFluz&%JLt8K2madk?h5En?nz$ zhW>lF=1V?K>ug@^XD^}L@y!L}D~-`b_#yP=SDjPSK1kMrW!`Z*Duo1XF9{iMgH;im$POStmmZnlSKr_9o!ysn>^=3dxZ zyMpMIwERo!nVL&5rONL1!u<+e=I4Bcj1DjXkB_9c>68&<`T4hwOk`zhI0`lJ zfZ0HAOcK5sxkPY)2;O`rJMc4;OO8x^3@Xe8)}(uCIx6Li6rfsT z=js+IAe)$qLLeQm;At${ng5bBb(v=-`uWR&a{v?1#;344weCXrxYRqfEeLxczjmL_ zigP(31l^?0V+bV?u$NAQdBQf1t1OLO%Rl|xEYnDf$M|@?Mqp+!jBLyGcR4_k9oRT_ zxuYv+Q7#LR^}2T(iC{|kN_T&C(or2}tfqBck8!aB-&Lq(Ybsc_{OijG){a`T`FC_( zh3)hOBGga2WbQbsppGof(iHWkHOGFVS%;UL9>~U z{JJVI`b)3i>QRNqO<F(}owOq1Y&W4rR*HSgS4m%bX!%b@m)@-oV|wbIa1qaE%Fhyj+| z-Pmo(d5r4uQ0DqbSOSk|ccv`X$%Z>o#Z0BvoJj*^QIU+u{9k0Tu8gvE6s(N(E3)my zFRm_>J4fr?4Awi(KSDa2(+(9-0#(RL<@oNL=HPdOzPU}uh%1h*-!)N^H>@My6*-l8 zRm$my=b%Ky;aMJ4<{eS$_0;my!twFUw_so_wT}rbs!_&NpNOHCiZx4-Bef(DDXT7| zUl1b{ff%Zwu1fN9&vc@BE2yC5sg3bk!M}%>yh_(^nh||7zJZq9;4)eK&97K{8Z@^l zc-~82v6YrhZV`Z!XZETknR?g{E14>RI8RR3m^*Jj5|`>0 z$QLA*p)Kcj<}h43oU$4`-W%+p*NY%8@mRv@Q`&=4L^~J`uDPFOv5CrxJc3D=L9kgU zW(uwMBIoQSp;e(7tuT!(XJ7z8N5nnGpk`d2N@_Rp&mAKiLYiLXy2rohiiz4uV$8WL zi_WnXccS$|B`b1NYXQnH2sik8Ik&c53K9kNuH1)#)U~vy_ccy0o~?c#gpbo;=_#w|4*W@PeGGSj@0opwmmy4aYUkZNt}~vfq?z*8 zFi^dxl8x+ZW@V?WrB(i&TbuyVtzkb|jAN_K_-C#1PY~~-`IUNd?|~(2uhWdR%esOz zb+?$+ZP~JumPESjU{oTiDEEzqkdjs;=SCyeS0JtAZO_kA7h#;xd>v;}TD`O)RIybjIQKA;yA{j{B}%v$#rzR9c4mESu5>OXJGQ*?jaO%i{;y6hMEW}SAY}&) zMHOn*_N(lpu0xybRNbaqV?tjYKRx2->Uhk6w8f=u@cAWh#a$b*ft6Bn(0R?*%exl7 zRc@}D9=TX&>#wwlZEo<;cdAPB%yt7-`wq{Id(c`pwH&d$6sHuIR-vr`E_H^{N9UEJ zTa`wk@KUZQH(6fHsTnM1rI2U$^UJ|m9XzQGb);5_`;930 zcffb@{*4z@8RjZd;qxzR4Q&(v0SC#?F4-yNM3DA@d{IK&GI#gj?4=vZcaGyPA0Uxr-#hX?lA5JsN}}R3n$x6wSstQZz=ZW{?fL(R;ZJ!HAF5E2me zB=jlxHMgU)i4DXJ#y&%9wtIW&Ycv0OhReHjDYB*ZqHme~HWJ)aEpdU0xjzsW5i!n+ zaB?+qaIjPb20;BE47Oc8U1(MFkgWK<;JQaH@(96}SE1qNL(!<4tmqZ4uS&&-JOk8# zs|%2MYdJ5dkb<{7F2~bT7Jtjg<_hn;o^N#((pUNW=89)AX-cG0*L%%H)(Xbrd>PdK z><9W+mC62v@xn?reRp^bE-hi-M)uREk@kv*R#x1F^1zGM8C4{q29zjI&drZx->cR&2EQ50 zn`TcZ9BXY1A7~1>%E<+#lSth{HG?M{Mj=7qP`eAts_lVUKPTb-7@7%~E3AsDHBlv`khQU0nBQ8Mf7qCdtoGcIgcvOJtP-?%#+=GiS0^R`0C1z3wBr(EwWx`1L4!ane{(AP|0eQyh~v220I`^bk>eY5bkt>> z>+^!Lk|*p)rye3vxhF?e$OG($FLug}aZe?4;>37plH#td@=J6QWy4Ky z$?2hevO*bO&xW@{^>O-9m!7;m=_SQQav?M9Vpm6_;&7T1>HGsEaYS0mbzX9%17N?| z)iFhc7YrbDeSM%}e!wmX{>^mFbzD*{_Z#AGd-}WL)LS!-(2-JJ5Akoo@!P&CJcgMG zm-5SpGj!L?lr`h_0*JfLL&4R5Mfv97_g6rkDUj=YV)VPC?g0^P>0s1Uo$7)c7y-f% z$C)XM;Wp+M9GtKK9Ti;fgVp-ikoiaL7MxtJ&y~V3V(Kaps*o4dE8wvW%EzPyRvE%5172>hYIY%!F`gbimK$g+|L_PnU6m9_EB4;&x?F6WwJW*%#r0`p}<3 zMg5oL^2)c9FI4_9yTkQ4dw~9Hsf1)c=ZwC$*GgzN8cQ8RTpz|+b!AH{M( zK~s3u^}UsVgcC?A=mzg-!8KQRz|*_z1o~P2)^dWZjxDYTo33U~&XQzd-~r$k{4;em z|LSl8wODH_-rr_ndDIrSe%UQo|BTcBpQ}#Jg%$5OBDn3G;yKa40*v;t_Kn4U;s*x= za1L{GhWszGnPAX|PxOFX(vX0FWq1Q0%ad9<^-g%@$ zYy`Xc>2|oQ#4tpTQ#tyT9alf}$xpdz0tp?h*Z7$}?UMmAEm&y>jGaDJ<7Z3GGA zjg5tJ^)j33^T-#Q(QsK?9dwVa4yH;aAWcUXZgyGRKMKAOJf3ksPx+pkIL+VIaFxVm z;C6=e(D`bhHg{b*X^q$x9PSETxPdPYj627amGH)k9Tel&CA)V2Vj9mlx5iIXQ+R6C zg%GTNa&rh^k6_9TD>nXW-i-0(-iXwPf z_>JPh+8fXQcm{a{$|_>u0Ayk5D^|5#-h?`4F!FdxlrtX2K!s>BuDL>GeY>>$Ej!-j z=ZTfkv}I%f^6sr7(u{R^%w9t$#$&F4Ic~vb9C-FzS6lm%6f^aeJ%kQ(Q6uY0zGXei zuV~(0r_t$Au@Ry}>Oe0#g(2KS*L%FC;A5r8;~5o(X<$KH6OU-Ddmlq~>-)2|u>kX8 z*Hs?*(2fdwSznzG-k$zG28z~1h8X?^cw624-r;gpOtj4r)SZ^((0O_CnE{XOhfO1{ z3lC__dZm3hwI!txks)Q$@TCG3E?fnlaNDf{aGnPstvrCrs`6};mWDy0^b4Ti-sw-V zSy>sSBmn#So$i@brDvG2XJ+1lv{dCe&Bg3|>f^Lw?L$j+=Auc7+@3-tlxWU2p?ac8 zv%r`*0#-Oaw|7VS(*AHy{7yh9HS!@cM&tIAs@4KQ$}npk&!>(a^s`XbpropwsAooF2_;-4EbRda@7R=7I>xJY3@8Dh zq~F9I^V*oztg31l!Uo^;jMAh_cj?e2sGIU=oC97m%i(fM>IhWidZ#wqM2Q?C_QC7^86K6SNr+(%qk?_pJ1^R%EK5wSr@4i)y{<@Lv z2!W*Q)Th0~FT53XS{@Y&e3hM#Fw9YLJ~mXhw+%}j7bcG^3SIluu+h9^eS>XXO@X#n zRig=&O(^;i4yp&S%B*RtTJ#Z+mbdi;tuH=Slhe$N6gj1u=(3qonC($T zh|H=vQrj4L`=MFOYh9M^!)T(hH`6e%r05y*z>+p$El-l#eeHba1j#IP{bg5H8o5m> zM4_DPt!N3P$vTT@Wz!qyVo64?C-2ci4B&d~huB$0hSrKalsXi9NBu6bkT=wG&%HiQ zr2Gb3USjHvW8bK{HA9EJJ~{$hBW)Qgru0I>dMQ~IUq>C4qcnB7bKW_$O()#gBajG} z@<+)|wIEO=O3M0GldDkGhxZ<2BR^gG&h_YEXCK!ny`b5kb52rHOINSg@8g)~N7G}z z%?*)%T~2!XXXu!Fno>cDSVBlh+S%dlFvU09@A^`$CCF4EdVHi{fPHRfKSmva%zBS)C@Y=13arG##y78Oj(-L9&i2@VZY`>BhD|M@nv!`rAi#<#3N)!ziq0P*UNl%nqZABp{;^*b^{aPNsKih?- zmD#J>6*pdwmE=^&3cdO0zDtK~NHeLX7+t~DB-}P7Imz{RuX55xPZ%SdOl_)QR<2Az z5G}fMPN*gCJgGlFAU8%Az8IjwUMEpigF* zp_w9LFA6VaiKen@MROn=slzYlyP=Otwq=K*>f`;WrCt7JhA?=0b;VImY$liFr19V` zxDM+-tvlOJ(PaJ38$77~ZhoAVgBq zalCV7)i>vLiM^F^{xVuTgXKO|Xb6m(qm(=y5p4i#xy$bIQal6L4&wj5Pv*h-#5d<* zR1_Yr*iQrrz}I0(?lrTRdDYzs7!VUgj>z!K6ckY}Sp18E z*E?N4=YLuY7PN)lMag-(286k164MD%LaJptFaJm$rmq6=c5pA<+=X1!oh#>+5cH$`1HsCwdA;4hLT-Q3y-xW z?OqLMBJ?RN@W%Xi#b3ITs^`Ypkvh;QGozjo=8hk7h#vxP(=)`0M{GJ1<Q=eBiN8mEc@R{Qb7#$K6Cl{8_@D%Yp?K&aq58zkhA zH{#^%Slc_2_E-H!(cS@+4Y$0fcSrQ*t1v&;`U)P_C9?Cu`U1Io_lHX!hf7Ds5Hbpy zG-OcOrrO)?<(|{kj$OO^4{Gcgr=*76V{Rnr?l7!e?T@vDPD^kPRA>80T|7e3~8eNP{@+(iHQtxV-VTXC4&eEqF|(#|#9$<-`J!O3dsOvK7qE{WLRNJk;zVQ5Tbd zG6jDLUWZ-0Yr&#w<0Fe*-5=gR+A{My5w|-wXJDW(L%{|9Z~$%wiuCnm=POV_8hx#C zZglb-aFBN=;=_j3qICnC5UxtL5&W;0a#xtfP&<6g`58#khbJ(;x6r;|jhIR3pYyqL z)d7J}!zPrEy`0K5Opp3^Cv8Ne=2vRp0D#~-_p0qw*g=#(C2t7fgfPF2&8-m49&KN`A#<~b1H!c6tYz0YDtJ5 zhD2p#MsYc#5Uf1k|0?A>!0Bf! zE_-1;tso6oh@aD=Pc{dV*z`!R1bb#OM8=-`SC!3yuEj@pC#N>(BV^YazTF73OO?V}JO(RZ@5DhwCl^@pbUW0o4^B7w(P z-fl8Dwzpp?3tVa=)K~w^eSu0KtsH^u11iiL@Vifp5AsjvZY>}$=*t^F5z8T$Sl4r((P$UgRGrcv{|D&!irIe#LGVfUJ<;Y<&Lnf;Tq~MjW=m(!VMMy zF=24w|CV|iMGF4Sz0zp$(FduFuWm%6_&#TT)4PHxBUg3=4z#_tw%BoW9dTl|7=+jL zZxBgRc-*5cfeTJZk187Xx^BPPY-~7teQL+E0 zWRn$(!MaU$U@trs!-^O;t>7|nl>YR7q`85Ly)@vE+v50bt~8~Bl&2Y=8%w8BE3mN( znjWr_&zXx(TZqx<*o}mU1_S0`e7?=-rxsDNv5V{%X_jJR)LiU0gBKpagf<+CV#6=tm11#jL;AiawWI0ve!aF%-bJTHd7m&@mA&Y9Qv zB`~i1V%PO1b{*R4pPjb7_Y zMmTqi5ap$Yx8#WXyz9P2)d#o`tPk)_bw0jI`Rvt-Nj*hmv&60M^wJ0O?gs;NM9YnV8L=v>f?q`X>0D|q7?FZt&@t#+&QUC~F>-F@L z8oq$QIiVw3Y(8E#eIu?fxxbNN!|5cc?E)v`G=rg$z?PL&!5E*DiTcll#1uMt{Uxdn z4^goy{^pc_{K;r@Hk8y4+*S2s+c`2dC^u1n;sX0rLWs0RBGy~SeAm8ahgwLfjG>7= z-|7Q}{QgYcAo~v|0b*tE=fc5Ts5=bUo*NP?WenZ-Q-V*57O=y>Vk0oY@Y|%k<;*8d zWta7e%JF^eJ9lb+fAY#$0gcp;ksRqU#*Ljl{w9i^zY7UV)<}3W>P>apu9>r*lC5?2 z?{8`UXWqB2FI;8Jx@_;=vs+E%HT7yD5{#FLZ(p7428_h1BC%b|T`=*m-0g{XBi0e65M?ojHCVyyfQYnSUjz#+^6zPW4xoLmfZ9 z94iVsmz}^T|9$_*G#(&v&&j8s)XvVzf%_=zc*XxFstV(4#=eHwGOLSMQ zsohgcMXS diff --git a/Godeps/_workspace/src/github.com/op/go-logging/format.go b/Godeps/_workspace/src/github.com/op/go-logging/format.go deleted file mode 100644 index 99b1ddb72..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/format.go +++ /dev/null @@ -1,368 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "path" - "path/filepath" - "regexp" - "runtime" - "strings" - "sync" - "time" -) - -// TODO see Formatter interface in fmt/print.go -// TODO try text/template, maybe it have enough performance -// TODO other template systems? -// TODO make it possible to specify formats per backend? -type fmtVerb int - -const ( - fmtVerbTime fmtVerb = iota - fmtVerbLevel - fmtVerbId - fmtVerbPid - fmtVerbProgram - fmtVerbModule - fmtVerbMessage - fmtVerbLongfile - fmtVerbShortfile - fmtVerbLongpkg - fmtVerbShortpkg - fmtVerbLongfunc - fmtVerbShortfunc - fmtVerbLevelColor - - // Keep last, there are no match for these below. - fmtVerbUnknown - fmtVerbStatic -) - -var fmtVerbs = []string{ - "time", - "level", - "id", - "pid", - "program", - "module", - "message", - "longfile", - "shortfile", - "longpkg", - "shortpkg", - "longfunc", - "shortfunc", - "color", -} - -const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00" - -var defaultVerbsLayout = []string{ - rfc3339Milli, - "s", - "d", - "d", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "s", - "", -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) -) - -func getFmtVerbByName(name string) fmtVerb { - for i, verb := range fmtVerbs { - if name == verb { - return fmtVerb(i) - } - } - return fmtVerbUnknown -} - -// Formatter is the required interface for a custom log record formatter. -type Formatter interface { - Format(calldepth int, r *Record, w io.Writer) error -} - -// formatter is used by all backends unless otherwise overriden. -var formatter struct { - sync.RWMutex - def Formatter -} - -func getFormatter() Formatter { - formatter.RLock() - defer formatter.RUnlock() - return formatter.def -} - -var ( - // DefaultFormatter is the default formatter used and is only the message. - DefaultFormatter Formatter = MustStringFormatter("%{message}") - - // Glog format - GlogFormatter Formatter = MustStringFormatter("%{level:.1s}%{time:0102 15:04:05.999999} %{pid} %{shortfile}] %{message}") -) - -// SetFormatter sets the default formatter for all new backends. A backend will -// fetch this value once it is needed to format a record. Note that backends -// will cache the formatter after the first point. For now, make sure to set -// the formatter before logging. -func SetFormatter(f Formatter) { - formatter.Lock() - defer formatter.Unlock() - formatter.def = f -} - -var formatRe *regexp.Regexp = regexp.MustCompile(`%{([a-z]+)(?::(.*?[^\\]))?}`) - -type part struct { - verb fmtVerb - layout string -} - -// stringFormatter contains a list of parts which explains how to build the -// formatted string passed on to the logging backend. -type stringFormatter struct { - parts []part -} - -// NewStringFormatter returns a new Formatter which outputs the log record as a -// string based on the 'verbs' specified in the format string. -// -// The verbs: -// -// General: -// %{id} Sequence number for log message (uint64). -// %{pid} Process id (int) -// %{time} Time when log occurred (time.Time) -// %{level} Log level (Level) -// %{module} Module (string) -// %{program} Basename of os.Args[0] (string) -// %{message} Message (string) -// %{longfile} Full file name and line number: /a/b/c/d.go:23 -// %{shortfile} Final file name element and line number: d.go:23 -// %{color} ANSI color based on log level -// -// For normal types, the output can be customized by using the 'verbs' defined -// in the fmt package, eg. '%{id:04d}' to make the id output be '%04d' as the -// format string. -// -// For time.Time, use the same layout as time.Format to change the time format -// when output, eg "2006-01-02T15:04:05.999Z-07:00". -// -// For the 'color' verb, the output can be adjusted to either use bold colors, -// i.e., '%{color:bold}' or to reset the ANSI attributes, i.e., -// '%{color:reset}' Note that if you use the color verb explicitly, be sure to -// reset it or else the color state will persist past your log message. e.g., -// "%{color:bold}%{time:15:04:05} %{level:-8s}%{color:reset} %{message}" will -// just colorize the time and level, leaving the message uncolored. -// -// There's also a couple of experimental 'verbs'. These are exposed to get -// feedback and needs a bit of tinkering. Hence, they might change in the -// future. -// -// Experimental: -// %{longpkg} Full package path, eg. github.com/go-logging -// %{shortpkg} Base package path, eg. go-logging -// %{longfunc} Full function name, eg. littleEndian.PutUint32 -// %{shortfunc} Base function name, eg. PutUint32 -func NewStringFormatter(format string) (*stringFormatter, error) { - var fmter = &stringFormatter{} - - // Find the boundaries of all %{vars} - matches := formatRe.FindAllStringSubmatchIndex(format, -1) - if matches == nil { - return nil, errors.New("logger: invalid log format: " + format) - } - - // Collect all variables and static text for the format - prev := 0 - for _, m := range matches { - start, end := m[0], m[1] - if start > prev { - fmter.add(fmtVerbStatic, format[prev:start]) - } - - name := format[m[2]:m[3]] - verb := getFmtVerbByName(name) - if verb == fmtVerbUnknown { - return nil, errors.New("logger: unknown variable: " + name) - } - - // Handle layout customizations or use the default. If this is not for the - // time or color formatting, we need to prefix with %. - layout := defaultVerbsLayout[verb] - if m[4] != -1 { - layout = format[m[4]:m[5]] - } - if verb != fmtVerbTime && verb != fmtVerbLevelColor { - layout = "%" + layout - } - - fmter.add(verb, layout) - prev = end - } - end := format[prev:] - if end != "" { - fmter.add(fmtVerbStatic, end) - } - - // Make a test run to make sure we can format it correctly. - t, err := time.Parse(time.RFC3339, "2010-02-04T21:00:57-08:00") - if err != nil { - panic(err) - } - r := &Record{ - Id: 12345, - Time: t, - Module: "logger", - fmt: "hello %s", - args: []interface{}{"go"}, - } - if err := fmter.Format(0, r, &bytes.Buffer{}); err != nil { - return nil, err - } - - return fmter, nil -} - -// MustStringFormatter is equivalent to NewStringFormatter with a call to panic -// on error. -func MustStringFormatter(format string) *stringFormatter { - f, err := NewStringFormatter(format) - if err != nil { - panic("Failed to initialized string formatter: " + err.Error()) - } - return f -} - -func (f *stringFormatter) add(verb fmtVerb, layout string) { - f.parts = append(f.parts, part{verb, layout}) -} - -func (f *stringFormatter) Format(calldepth int, r *Record, output io.Writer) error { - for _, part := range f.parts { - if part.verb == fmtVerbStatic { - output.Write([]byte(part.layout)) - } else if part.verb == fmtVerbTime { - output.Write([]byte(r.Time.Format(part.layout))) - } else if part.verb == fmtVerbLevelColor { - if part.layout == "bold" { - output.Write([]byte(boldcolors[r.Level])) - } else if part.layout == "reset" { - output.Write([]byte("\033[0m")) - } else { - output.Write([]byte(colors[r.Level])) - } - } else { - var v interface{} - switch part.verb { - case fmtVerbLevel: - v = r.Level - break - case fmtVerbId: - v = r.Id - break - case fmtVerbPid: - v = pid - break - case fmtVerbProgram: - v = program - break - case fmtVerbModule: - v = r.Module - break - case fmtVerbMessage: - v = r.Message() - break - case fmtVerbLongfile, fmtVerbShortfile: - _, file, line, ok := runtime.Caller(calldepth + 1) - if !ok { - file = "???" - line = 0 - } else if part.verb == fmtVerbShortfile { - file = filepath.Base(file) - } - v = fmt.Sprintf("%s:%d", file, line) - case fmtVerbLongfunc, fmtVerbShortfunc, - fmtVerbLongpkg, fmtVerbShortpkg: - // TODO cache pc - v = "???" - if pc, _, _, ok := runtime.Caller(calldepth + 1); ok { - if f := runtime.FuncForPC(pc); f != nil { - v = formatFuncName(part.verb, f.Name()) - } - } - default: - panic("unhandled format part") - } - fmt.Fprintf(output, part.layout, v) - } - } - return nil -} - -// formatFuncName tries to extract certain part of the runtime formatted -// function name to some pre-defined variation. -// -// This function is known to not work properly if the package path or name -// contains a dot. -func formatFuncName(v fmtVerb, f string) string { - i := strings.LastIndex(f, "/") - j := strings.Index(f[i+1:], ".") - if j < 1 { - return "???" - } - pkg, fun := f[:i+j+1], f[i+j+2:] - switch v { - case fmtVerbLongpkg: - return pkg - case fmtVerbShortpkg: - return path.Base(pkg) - case fmtVerbLongfunc: - return fun - case fmtVerbShortfunc: - i = strings.LastIndex(fun, ".") - return fun[i+1:] - } - panic("unexpected func formatter") -} - -// backendFormatter combines a backend with a specific formatter making it -// possible to have different log formats for different backends. -type backendFormatter struct { - b Backend - f Formatter -} - -// NewBackendFormatter creates a new backend which makes all records that -// passes through it beeing formatted by the specific formatter. -func NewBackendFormatter(b Backend, f Formatter) *backendFormatter { - return &backendFormatter{b, f} -} - -// Log implements the Log function required by the Backend interface. -func (bf *backendFormatter) Log(level Level, calldepth int, r *Record) error { - // Make a shallow copy of the record and replace any formatter - r2 := *r - r2.formatter = bf.f - return bf.b.Log(level, calldepth+1, &r2) -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/format_test.go b/Godeps/_workspace/src/github.com/op/go-logging/format_test.go deleted file mode 100644 index c008e9e8b..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/format_test.go +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "testing" -) - -func TestFormat(t *testing.T) { - backend := InitForTesting(DEBUG) - - f, err := NewStringFormatter("%{shortfile} %{time:2006-01-02T15:04:05} %{level:.1s} %{id:04d} %{module} %{message}") - if err != nil { - t.Fatalf("failed to set format: %s", err) - } - SetFormatter(f) - - log := MustGetLogger("module") - log.Debug("hello") - - line := MemoryRecordN(backend, 0).Formatted(0) - if "format_test.go:24 1970-01-01T00:00:00 D 0001 module hello" != line { - t.Errorf("Unexpected format: %s", line) - } -} - -func logAndGetLine(backend *MemoryBackend) string { - MustGetLogger("foo").Debug("hello") - return MemoryRecordN(backend, 0).Formatted(1) -} - -func getLastLine(backend *MemoryBackend) string { - return MemoryRecordN(backend, 0).Formatted(1) -} - -func realFunc(backend *MemoryBackend) string { - return logAndGetLine(backend) -} - -type structFunc struct{} - -func (structFunc) Log(backend *MemoryBackend) string { - return logAndGetLine(backend) -} - -func TestRealFuncFormat(t *testing.T) { - backend := InitForTesting(DEBUG) - SetFormatter(MustStringFormatter("%{shortfunc}")) - - line := realFunc(backend) - if "realFunc" != line { - t.Errorf("Unexpected format: %s", line) - } -} - -func TestStructFuncFormat(t *testing.T) { - backend := InitForTesting(DEBUG) - SetFormatter(MustStringFormatter("%{longfunc}")) - - var x structFunc - line := x.Log(backend) - if "structFunc.Log" != line { - t.Errorf("Unexpected format: %s", line) - } -} - -func TestVarFuncFormat(t *testing.T) { - backend := InitForTesting(DEBUG) - SetFormatter(MustStringFormatter("%{shortfunc}")) - - var varFunc = func() string { - return logAndGetLine(backend) - } - - line := varFunc() - if "???" == line || "TestVarFuncFormat" == line || "varFunc" == line { - t.Errorf("Unexpected format: %s", line) - } -} - -func TestFormatFuncName(t *testing.T) { - var tests = []struct { - filename string - longpkg string - shortpkg string - longfunc string - shortfunc string - }{ - {"", - "???", - "???", - "???", - "???"}, - {"main", - "???", - "???", - "???", - "???"}, - {"main.", - "main", - "main", - "", - ""}, - {"main.main", - "main", - "main", - "main", - "main"}, - {"github.com/op/go-logging.func·001", - "github.com/op/go-logging", - "go-logging", - "func·001", - "func·001"}, - {"github.com/op/go-logging.stringFormatter.Format", - "github.com/op/go-logging", - "go-logging", - "stringFormatter.Format", - "Format"}, - } - - var v string - for _, test := range tests { - v = formatFuncName(fmtVerbLongpkg, test.filename) - if test.longpkg != v { - t.Errorf("%s != %s", test.longpkg, v) - } - v = formatFuncName(fmtVerbShortpkg, test.filename) - if test.shortpkg != v { - t.Errorf("%s != %s", test.shortpkg, v) - } - v = formatFuncName(fmtVerbLongfunc, test.filename) - if test.longfunc != v { - t.Errorf("%s != %s", test.longfunc, v) - } - v = formatFuncName(fmtVerbShortfunc, test.filename) - if test.shortfunc != v { - t.Errorf("%s != %s", test.shortfunc, v) - } - } -} - -func TestBackendFormatter(t *testing.T) { - InitForTesting(DEBUG) - - // Create two backends and wrap one of the with a backend formatter - b1 := NewMemoryBackend(1) - b2 := NewMemoryBackend(1) - - f := MustStringFormatter("%{level} %{message}") - bf := NewBackendFormatter(b2, f) - - SetBackend(b1, bf) - - log := MustGetLogger("module") - log.Info("foo") - if "foo" != getLastLine(b1) { - t.Errorf("Unexpected line: %s", getLastLine(b1)) - } - if "INFO foo" != getLastLine(b2) { - t.Errorf("Unexpected line: %s", getLastLine(b2)) - } -} - -func BenchmarkStringFormatter(b *testing.B) { - fmt := "%{time:2006-01-02T15:04:05} %{level:.1s} %{id:04d} %{module} %{message}" - f := MustStringFormatter(fmt) - - backend := InitForTesting(DEBUG) - buf := &bytes.Buffer{} - log := MustGetLogger("module") - log.Debug("") - record := MemoryRecordN(backend, 0) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if err := f.Format(1, record, buf); err != nil { - b.Fatal(err) - buf.Truncate(0) - } - } -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/level.go b/Godeps/_workspace/src/github.com/op/go-logging/level.go deleted file mode 100644 index 1ef03917e..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/level.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "errors" - "strings" - "sync" -) - -var ErrInvalidLogLevel = errors.New("logger: invalid log level") - -// Level defines all available log levels for log messages. -type Level int - -const ( - CRITICAL Level = iota - ERROR - WARNING - NOTICE - INFO - DEBUG -) - -var levelNames = []string{ - "CRITICAL", - "ERROR", - "WARNING", - "NOTICE", - "INFO", - "DEBUG", -} - -// String returns the string representation of a logging level. -func (p Level) String() string { - return levelNames[p] -} - -// LogLevel returns the log level from a string representation. -func LogLevel(level string) (Level, error) { - for i, name := range levelNames { - if strings.EqualFold(name, level) { - return Level(i), nil - } - } - return ERROR, ErrInvalidLogLevel -} - -type Leveled interface { - GetLevel(string) Level - SetLevel(Level, string) - IsEnabledFor(Level, string) bool -} - -// LeveledBackend is a log backend with additional knobs for setting levels on -// individual modules to different levels. -type LeveledBackend interface { - Backend - Leveled -} - -type moduleLeveled struct { - levels map[string]Level - backend Backend - formatter Formatter - once sync.Once -} - -// AddModuleLevel wraps a log backend with knobs to have different log levels -// for different modules. -func AddModuleLevel(backend Backend) LeveledBackend { - var leveled LeveledBackend - var ok bool - if leveled, ok = backend.(LeveledBackend); !ok { - leveled = &moduleLeveled{ - levels: make(map[string]Level), - backend: backend, - } - } - return leveled -} - -// GetLevel returns the log level for the given module. -func (l *moduleLeveled) GetLevel(module string) Level { - level, exists := l.levels[module] - if exists == false { - level, exists = l.levels[""] - // no configuration exists, default to debug - if exists == false { - level = DEBUG - } - } - return level -} - -// SetLevel sets the log level for the given module. -func (l *moduleLeveled) SetLevel(level Level, module string) { - l.levels[module] = level -} - -// IsEnabledFor will return true if logging is enabled for the given module. -func (l *moduleLeveled) IsEnabledFor(level Level, module string) bool { - return level <= l.GetLevel(module) -} - -func (l *moduleLeveled) Log(level Level, calldepth int, rec *Record) (err error) { - if l.IsEnabledFor(level, rec.Module) { - // TODO get rid of traces of formatter here. BackendFormatter should be used. - rec.formatter = l.getFormatterAndCacheCurrent() - err = l.backend.Log(level, calldepth+1, rec) - } - return -} - -func (l *moduleLeveled) getFormatterAndCacheCurrent() Formatter { - l.once.Do(func() { - if l.formatter == nil { - l.formatter = getFormatter() - } - }) - return l.formatter -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/level_test.go b/Godeps/_workspace/src/github.com/op/go-logging/level_test.go deleted file mode 100644 index c8f9a3733..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/level_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import "testing" - -func TestLevelString(t *testing.T) { - // Make sure all levels can be converted from string -> constant -> string - for _, name := range levelNames { - level, err := LogLevel(name) - if err != nil { - t.Errorf("failed to get level: %v", err) - continue - } - - if level.String() != name { - t.Errorf("invalid level conversion: %v != %v", level, name) - } - } -} - -func TestLevelLogLevel(t *testing.T) { - tests := []struct { - expected Level - level string - }{ - {-1, "bla"}, - {INFO, "iNfO"}, - {ERROR, "error"}, - {WARNING, "warninG"}, - } - - for _, test := range tests { - level, err := LogLevel(test.level) - if err != nil { - if test.expected == -1 { - continue - } else { - t.Errorf("failed to convert %s: %s", test.level, err) - } - } - if test.expected != level { - t.Errorf("failed to convert %s to level: %s != %s", test.level, test.expected, level) - } - } -} - -func TestLevelModuleLevel(t *testing.T) { - backend := NewMemoryBackend(128) - - leveled := AddModuleLevel(backend) - leveled.SetLevel(NOTICE, "") - leveled.SetLevel(ERROR, "foo") - leveled.SetLevel(INFO, "foo.bar") - leveled.SetLevel(WARNING, "bar") - - expected := []struct { - level Level - module string - }{ - {NOTICE, ""}, - {NOTICE, "something"}, - {ERROR, "foo"}, - {INFO, "foo.bar"}, - {WARNING, "bar"}, - } - - for _, e := range expected { - actual := leveled.GetLevel(e.module) - if e.level != actual { - t.Errorf("unexpected level in %s: %s != %s", e.module, e.level, actual) - } - } -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/log.go b/Godeps/_workspace/src/github.com/op/go-logging/log.go deleted file mode 100644 index f009f8af5..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/log.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "fmt" - "io" - "log" -) - -// TODO initialize here -var colors []string -var boldcolors []string - -type color int - -const ( - colorBlack = (iota + 30) - colorRed - colorGreen - colorYellow - colorBlue - colorMagenta - colorCyan - colorWhite -) - -// LogBackend utilizes the standard log module. -type LogBackend struct { - Logger *log.Logger - Color bool -} - -// NewLogBackend creates a new LogBackend. -func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend { - return &LogBackend{Logger: log.New(out, prefix, flag)} -} - -func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error { - if b.Color { - buf := &bytes.Buffer{} - buf.Write([]byte(colors[level])) - buf.Write([]byte(rec.Formatted(calldepth + 1))) - buf.Write([]byte("\033[0m")) - // For some reason, the Go logger arbitrarily decided "2" was the correct - // call depth... - return b.Logger.Output(calldepth+2, buf.String()) - } else { - return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1)) - } - panic("should not be reached") -} - -func colorSeq(color color) string { - return fmt.Sprintf("\033[%dm", int(color)) -} - -func colorSeqBold(color color) string { - return fmt.Sprintf("\033[%d;1m", int(color)) -} - -func init() { - colors = []string{ - CRITICAL: colorSeq(colorMagenta), - ERROR: colorSeq(colorRed), - WARNING: colorSeq(colorYellow), - NOTICE: colorSeq(colorGreen), - DEBUG: colorSeq(colorCyan), - } - boldcolors = []string{ - CRITICAL: colorSeqBold(colorMagenta), - ERROR: colorSeqBold(colorRed), - WARNING: colorSeqBold(colorYellow), - NOTICE: colorSeqBold(colorGreen), - DEBUG: colorSeqBold(colorCyan), - } -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/log_test.go b/Godeps/_workspace/src/github.com/op/go-logging/log_test.go deleted file mode 100644 index 0ddcdf374..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/log_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "bytes" - "io/ioutil" - "log" - "strings" - "testing" -) - -func TestLogCalldepth(t *testing.T) { - buf := &bytes.Buffer{} - SetBackend(NewLogBackend(buf, "", log.Lshortfile)) - SetFormatter(MustStringFormatter("%{shortfile} %{level} %{message}")) - - log := MustGetLogger("test") - log.Info("test filename") - - parts := strings.SplitN(buf.String(), " ", 2) - - // Verify that the correct filename is registered by the stdlib logger - if !strings.HasPrefix(parts[0], "log_test.go:") { - t.Errorf("incorrect filename: %s", parts[0]) - } - // Verify that the correct filename is registered by go-logging - if !strings.HasPrefix(parts[1], "log_test.go:") { - t.Errorf("incorrect filename: %s", parts[1]) - } -} - -func BenchmarkLogMemoryBackendIgnored(b *testing.B) { - backend := SetBackend(NewMemoryBackend(1024)) - backend.SetLevel(INFO, "") - RunLogBenchmark(b) -} - -func BenchmarkLogMemoryBackend(b *testing.B) { - backend := SetBackend(NewMemoryBackend(1024)) - backend.SetLevel(DEBUG, "") - RunLogBenchmark(b) -} - -func BenchmarkLogChannelMemoryBackend(b *testing.B) { - channelBackend := NewChannelMemoryBackend(1024) - backend := SetBackend(channelBackend) - backend.SetLevel(DEBUG, "") - RunLogBenchmark(b) - channelBackend.Flush() -} - -func BenchmarkLogLeveled(b *testing.B) { - backend := SetBackend(NewLogBackend(ioutil.Discard, "", 0)) - backend.SetLevel(INFO, "") - - RunLogBenchmark(b) -} - -func BenchmarkLogLogBackend(b *testing.B) { - backend := SetBackend(NewLogBackend(ioutil.Discard, "", 0)) - backend.SetLevel(DEBUG, "") - RunLogBenchmark(b) -} - -func BenchmarkLogLogBackendColor(b *testing.B) { - colorizer := NewLogBackend(ioutil.Discard, "", 0) - colorizer.Color = true - backend := SetBackend(colorizer) - backend.SetLevel(DEBUG, "") - RunLogBenchmark(b) -} - -func BenchmarkLogLogBackendStdFlags(b *testing.B) { - backend := SetBackend(NewLogBackend(ioutil.Discard, "", log.LstdFlags)) - backend.SetLevel(DEBUG, "") - RunLogBenchmark(b) -} - -func BenchmarkLogLogBackendLongFileFlag(b *testing.B) { - backend := SetBackend(NewLogBackend(ioutil.Discard, "", log.Llongfile)) - backend.SetLevel(DEBUG, "") - RunLogBenchmark(b) -} - -func RunLogBenchmark(b *testing.B) { - password := Password("foo") - log := MustGetLogger("test") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - log.Debug("log line for %d and this is rectified: %s", i, password) - } -} - -func BenchmarkLogFixed(b *testing.B) { - backend := SetBackend(NewLogBackend(ioutil.Discard, "", 0)) - backend.SetLevel(DEBUG, "") - - RunLogBenchmarkFixedString(b) -} - -func BenchmarkLogFixedIgnored(b *testing.B) { - backend := SetBackend(NewLogBackend(ioutil.Discard, "", 0)) - backend.SetLevel(INFO, "") - RunLogBenchmarkFixedString(b) -} - -func RunLogBenchmarkFixedString(b *testing.B) { - log := MustGetLogger("test") - - b.ResetTimer() - for i := 0; i < b.N; i++ { - log.Debug("some random fixed text") - } -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/logger.go b/Godeps/_workspace/src/github.com/op/go-logging/logger.go deleted file mode 100644 index ca1f9a4e4..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/logger.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package logging implements a logging infrastructure for Go. It supports -// different logging backends like syslog, file and memory. Multiple backends -// can be utilized with different log levels per backend and logger. -package logging - -import ( - "bytes" - "fmt" - "log" - "os" - "strings" - "sync/atomic" - "time" -) - -// Redactor is an interface for types that may contain sensitive information -// (like passwords), which shouldn't be printed to the log. The idea was found -// in relog as part of the vitness project. -type Redactor interface { - Redacted() interface{} -} - -// Redact returns a string of * having the same length as s. -func Redact(s string) string { - return strings.Repeat("*", len(s)) -} - -var ( - // Sequence number is incremented and utilized for all log records created. - sequenceNo uint64 - - // timeNow is a customizable for testing purposes. - timeNow = time.Now -) - -// Record represents a log record and contains the timestamp when the record -// was created, an increasing id, filename and line and finally the actual -// formatted log line. -type Record struct { - Id uint64 - Time time.Time - Module string - Level Level - - // message is kept as a pointer to have shallow copies update this once - // needed. - message *string - args []interface{} - fmt string - formatter Formatter - formatted string -} - -func (r *Record) Formatted(calldepth int) string { - if r.formatted == "" { - var buf bytes.Buffer - r.formatter.Format(calldepth+1, r, &buf) - r.formatted = buf.String() - } - return r.formatted -} - -func (r *Record) Message() string { - if r.message == nil { - // Redact the arguments that implements the Redactor interface - for i, arg := range r.args { - if redactor, ok := arg.(Redactor); ok == true { - r.args[i] = redactor.Redacted() - } - } - msg := fmt.Sprintf(r.fmt, r.args...) - r.message = &msg - } - return *r.message -} - -type Logger struct { - Module string - backend LeveledBackend - haveBackend bool - - // ExtraCallDepth can be used to add additional call depth when getting the - // calling function. This is normally used when wrapping a logger. - ExtraCalldepth int -} - -func (l *Logger) SetBackend(backend LeveledBackend) { - l.backend = backend - l.haveBackend = true -} - -// TODO call NewLogger and remove MustGetLogger? -// GetLogger creates and returns a Logger object based on the module name. -func GetLogger(module string) (*Logger, error) { - return &Logger{Module: module}, nil -} - -// MustGetLogger is like GetLogger but panics if the logger can't be created. -// It simplifies safe initialization of a global logger for eg. a package. -func MustGetLogger(module string) *Logger { - logger, err := GetLogger(module) - if err != nil { - panic("logger: " + module + ": " + err.Error()) - } - return logger -} - -// Reset restores the internal state of the logging library. -func Reset() { - // TODO make a global Init() method to be less magic? or make it such that - // if there's no backends at all configured, we could use some tricks to - // automatically setup backends based if we have a TTY or not. - sequenceNo = 0 - b := SetBackend(NewLogBackend(os.Stderr, "", log.LstdFlags)) - b.SetLevel(DEBUG, "") - SetFormatter(DefaultFormatter) - timeNow = time.Now -} - -// IsEnabledFor returns true if the logger is enabled for the given level. -func (l *Logger) IsEnabledFor(level Level) bool { - return defaultBackend.IsEnabledFor(level, l.Module) -} - -func (l *Logger) log(lvl Level, format string, args ...interface{}) { - if !l.IsEnabledFor(lvl) { - return - } - - // Create the logging record and pass it in to the backend - record := &Record{ - Id: atomic.AddUint64(&sequenceNo, 1), - Time: timeNow(), - Module: l.Module, - Level: lvl, - fmt: format, - args: args, - } - - // TODO use channels to fan out the records to all backends? - // TODO in case of errors, do something (tricky) - - // calldepth=2 brings the stack up to the caller of the level - // methods, Info(), Fatal(), etc. - // ExtraCallDepth allows this to be extended further up the stack in case we - // are wrapping these methods, eg. to expose them package level - if l.haveBackend { - l.backend.Log(lvl, 2+l.ExtraCalldepth, record) - return - } - - defaultBackend.Log(lvl, 2+l.ExtraCalldepth, record) -} - -// Fatal is equivalent to l.Critical(fmt.Sprint()) followed by a call to os.Exit(1). -func (l *Logger) Fatal(args ...interface{}) { - s := fmt.Sprint(args...) - l.log(CRITICAL, "%s", s) - os.Exit(1) -} - -// Fatalf is equivalent to l.Critical followed by a call to os.Exit(1). -func (l *Logger) Fatalf(format string, args ...interface{}) { - l.log(CRITICAL, format, args...) - os.Exit(1) -} - -// Panic is equivalent to l.Critical(fmt.Sprint()) followed by a call to panic(). -func (l *Logger) Panic(args ...interface{}) { - s := fmt.Sprint(args...) - l.log(CRITICAL, "%s", s) - panic(s) -} - -// Panicf is equivalent to l.Critical followed by a call to panic(). -func (l *Logger) Panicf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - l.log(CRITICAL, "%s", s) - panic(s) -} - -// Critical logs a message using CRITICAL as log level. -func (l *Logger) Critical(format string, args ...interface{}) { - l.log(CRITICAL, format, args...) -} - -// Error logs a message using ERROR as log level. -func (l *Logger) Error(format string, args ...interface{}) { - l.log(ERROR, format, args...) -} - -// Warning logs a message using WARNING as log level. -func (l *Logger) Warning(format string, args ...interface{}) { - l.log(WARNING, format, args...) -} - -// Notice logs a message using NOTICE as log level. -func (l *Logger) Notice(format string, args ...interface{}) { - l.log(NOTICE, format, args...) -} - -// Info logs a message using INFO as log level. -func (l *Logger) Info(format string, args ...interface{}) { - l.log(INFO, format, args...) -} - -// Debug logs a message using DEBUG as log level. -func (l *Logger) Debug(format string, args ...interface{}) { - l.log(DEBUG, format, args...) -} - -func init() { - Reset() -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/logger_test.go b/Godeps/_workspace/src/github.com/op/go-logging/logger_test.go deleted file mode 100644 index acf249835..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/logger_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import "testing" - -type Password string - -func (p Password) Redacted() interface{} { - return Redact(string(p)) -} - -func TestSequenceNoOverflow(t *testing.T) { - // Forcefully set the next sequence number to the maximum - backend := InitForTesting(DEBUG) - sequenceNo = ^uint64(0) - - log := MustGetLogger("test") - log.Debug("test") - - if MemoryRecordN(backend, 0).Id != 0 { - t.Errorf("Unexpected sequence no: %v", MemoryRecordN(backend, 0).Id) - } -} - -func TestRedact(t *testing.T) { - backend := InitForTesting(DEBUG) - password := Password("123456") - log := MustGetLogger("test") - log.Debug("foo %s", password) - if "foo ******" != MemoryRecordN(backend, 0).Formatted(0) { - t.Errorf("redacted line: %v", MemoryRecordN(backend, 0)) - } -} - -func TestPrivateBackend(t *testing.T) { - stdBackend := InitForTesting(DEBUG) - log := MustGetLogger("test") - privateBackend := NewMemoryBackend(10240) - lvlBackend := AddModuleLevel(privateBackend) - lvlBackend.SetLevel(DEBUG, "") - log.SetBackend(lvlBackend) - log.Debug("to private backend") - if stdBackend.size > 0 { - t.Errorf("something in stdBackend, size of backend: %d", stdBackend.size) - } - if "to private baсkend" == MemoryRecordN(privateBackend, 0).Formatted(0) { - t.Errorf("logged to defaultBackend:", MemoryRecordN(privateBackend, 0)) - } - -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/memory.go b/Godeps/_workspace/src/github.com/op/go-logging/memory.go deleted file mode 100644 index d55868d26..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/memory.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package logging - -import ( - "sync" - "sync/atomic" - "time" - "unsafe" -) - -// TODO pick one of the memory backends and stick with it or share interface. - -// InitForTesting is a convenient method when using logging in a test. Once -// called, the time will be frozen to January 1, 1970 UTC. -func InitForTesting(level Level) *MemoryBackend { - Reset() - - memoryBackend := NewMemoryBackend(10240) - - leveledBackend := AddModuleLevel(memoryBackend) - leveledBackend.SetLevel(level, "") - SetBackend(leveledBackend) - - timeNow = func() time.Time { - return time.Unix(0, 0).UTC() - } - return memoryBackend -} - -// Node is a record node pointing to an optional next node. -type node struct { - next *node - Record *Record -} - -// Next returns the next record node. If there's no node available, it will -// return nil. -func (n *node) Next() *node { - return n.next -} - -// MemoryBackend is a simple memory based logging backend that will not produce -// any output but merly keep records, up to the given size, in memory. -type MemoryBackend struct { - size int32 - maxSize int32 - head, tail unsafe.Pointer -} - -// NewMemoryBackend creates a simple in-memory logging backend. -func NewMemoryBackend(size int) *MemoryBackend { - return &MemoryBackend{maxSize: int32(size)} -} - -// Log implements the Log method required by Backend. -func (b *MemoryBackend) Log(level Level, calldepth int, rec *Record) error { - var size int32 - - n := &node{Record: rec} - np := unsafe.Pointer(n) - - // Add the record to the tail. If there's no records available, tail and - // head will both be nil. When we successfully set the tail and the previous - // value was nil, it's safe to set the head to the current value too. - for { - tailp := b.tail - swapped := atomic.CompareAndSwapPointer( - &b.tail, - tailp, - np, - ) - if swapped == true { - if tailp == nil { - b.head = np - } else { - (*node)(tailp).next = n - } - size = atomic.AddInt32(&b.size, 1) - break - } - } - - // Since one record was added, we might have overflowed the list. Remove - // a record if that is the case. The size will fluctate a bit, but - // eventual consistent. - if b.maxSize > 0 && size > b.maxSize { - for { - headp := b.head - head := (*node)(b.head) - if head.next == nil { - break - } - swapped := atomic.CompareAndSwapPointer( - &b.head, - headp, - unsafe.Pointer(head.next), - ) - if swapped == true { - atomic.AddInt32(&b.size, -1) - break - } - } - } - return nil -} - -// Head returns the oldest record node kept in memory. It can be used to -// iterate over records, one by one, up to the last record. -// -// Note: new records can get added while iterating. Hence the number of records -// iterated over might be larger than the maximum size. -func (b *MemoryBackend) Head() *node { - return (*node)(b.head) -} - -type event int - -const ( - eventFlush event = iota - eventStop -) - -// ChannelMemoryBackend is very similar to the MemoryBackend, except that it -// internally utilizes a channel. -type ChannelMemoryBackend struct { - maxSize int - size int - incoming chan *Record - events chan event - mu sync.Mutex - running bool - flushWg sync.WaitGroup - stopWg sync.WaitGroup - head, tail *node -} - -// NewChannelMemoryBackend creates a simple in-memory logging backend which -// utilizes a go channel for communication. -// -// Start will automatically be called by this function. -func NewChannelMemoryBackend(size int) *ChannelMemoryBackend { - backend := &ChannelMemoryBackend{ - maxSize: size, - incoming: make(chan *Record, 1024), - events: make(chan event), - } - backend.Start() - return backend -} - -// Start launches the internal goroutine which starts processing data from the -// input channel. -func (b *ChannelMemoryBackend) Start() { - b.mu.Lock() - defer b.mu.Unlock() - - // Launch the goroutine unless it's already running. - if b.running != true { - b.running = true - b.stopWg.Add(1) - go b.process() - } -} - -func (b *ChannelMemoryBackend) process() { - defer b.stopWg.Done() - for { - select { - case rec := <-b.incoming: - b.insertRecord(rec) - case e := <-b.events: - switch e { - case eventStop: - return - case eventFlush: - for len(b.incoming) > 0 { - b.insertRecord(<-b.incoming) - } - b.flushWg.Done() - } - } - } -} - -func (b *ChannelMemoryBackend) insertRecord(rec *Record) { - prev := b.tail - b.tail = &node{Record: rec} - if prev == nil { - b.head = b.tail - } else { - prev.next = b.tail - } - - if b.maxSize > 0 && b.size >= b.maxSize { - b.head = b.head.next - } else { - b.size += 1 - } -} - -// Flush waits until all records in the buffered channel have been processed. -func (b *ChannelMemoryBackend) Flush() { - b.flushWg.Add(1) - b.events <- eventFlush - b.flushWg.Wait() -} - -// Stop signals the internal goroutine to exit and waits until it have. -func (b *ChannelMemoryBackend) Stop() { - b.mu.Lock() - if b.running == true { - b.running = false - b.events <- eventStop - } - b.mu.Unlock() - b.stopWg.Wait() -} - -// Log implements the Log method required by Backend. -func (b *ChannelMemoryBackend) Log(level Level, calldepth int, rec *Record) error { - b.incoming <- rec - return nil -} - -// Head returns the oldest record node kept in memory. It can be used to -// iterate over records, one by one, up to the last record. -// -// Note: new records can get added while iterating. Hence the number of records -// iterated over might be larger than the maximum size. -func (b *ChannelMemoryBackend) Head() *node { - return b.head -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/memory_test.go b/Godeps/_workspace/src/github.com/op/go-logging/memory_test.go deleted file mode 100644 index fe5a82e44..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/memory_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import ( - "strconv" - "testing" -) - -// TODO share more code between these tests -func MemoryRecordN(b *MemoryBackend, n int) *Record { - node := b.Head() - for i := 0; i < n; i++ { - if node == nil { - break - } - node = node.Next() - } - if node == nil { - return nil - } - return node.Record -} - -func ChannelMemoryRecordN(b *ChannelMemoryBackend, n int) *Record { - b.Flush() - node := b.Head() - for i := 0; i < n; i++ { - if node == nil { - break - } - node = node.Next() - } - if node == nil { - return nil - } - return node.Record -} - -func TestMemoryBackend(t *testing.T) { - backend := NewMemoryBackend(8) - SetBackend(backend) - - log := MustGetLogger("test") - - if nil != MemoryRecordN(backend, 0) || 0 != backend.size { - t.Errorf("memory level: %d", backend.size) - } - - // Run 13 times, the resulting vector should be [5..12] - for i := 0; i < 13; i++ { - log.Info("%d", i) - } - - if 8 != backend.size { - t.Errorf("record length: %d", backend.size) - } - record := MemoryRecordN(backend, 0) - if "5" != record.Formatted(0) { - t.Errorf("unexpected start: %s", record.Formatted(0)) - } - for i := 0; i < 8; i++ { - record = MemoryRecordN(backend, i) - if strconv.Itoa(i+5) != record.Formatted(0) { - t.Errorf("unexpected record: %v", record.Formatted(0)) - } - } - record = MemoryRecordN(backend, 7) - if "12" != record.Formatted(0) { - t.Errorf("unexpected end: %s", record.Formatted(0)) - } - record = MemoryRecordN(backend, 8) - if nil != record { - t.Errorf("unexpected eof: %s", record.Formatted(0)) - } -} - -func TestChannelMemoryBackend(t *testing.T) { - backend := NewChannelMemoryBackend(8) - SetBackend(backend) - - log := MustGetLogger("test") - - if nil != ChannelMemoryRecordN(backend, 0) || 0 != backend.size { - t.Errorf("memory level: %d", backend.size) - } - - // Run 13 times, the resulting vector should be [5..12] - for i := 0; i < 13; i++ { - log.Info("%d", i) - } - backend.Flush() - - if 8 != backend.size { - t.Errorf("record length: %d", backend.size) - } - record := ChannelMemoryRecordN(backend, 0) - if "5" != record.Formatted(0) { - t.Errorf("unexpected start: %s", record.Formatted(0)) - } - for i := 0; i < 8; i++ { - record = ChannelMemoryRecordN(backend, i) - if strconv.Itoa(i+5) != record.Formatted(0) { - t.Errorf("unexpected record: %v", record.Formatted(0)) - } - } - record = ChannelMemoryRecordN(backend, 7) - if "12" != record.Formatted(0) { - t.Errorf("unexpected end: %s", record.Formatted(0)) - } - record = ChannelMemoryRecordN(backend, 8) - if nil != record { - t.Errorf("unexpected eof: %s", record.Formatted(0)) - } -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/multi.go b/Godeps/_workspace/src/github.com/op/go-logging/multi.go deleted file mode 100644 index 3731653e6..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/multi.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -// TODO remove Level stuff from the multi logger. Do one thing. - -// multiLogger is a log multiplexer which can be used to utilize multiple log -// backends at once. -type multiLogger struct { - backends []LeveledBackend -} - -// MultiLogger creates a logger which contain multiple loggers. -func MultiLogger(backends ...Backend) LeveledBackend { - var leveledBackends []LeveledBackend - for _, backend := range backends { - leveledBackends = append(leveledBackends, AddModuleLevel(backend)) - } - return &multiLogger{leveledBackends} -} - -// Log passes the log record to all backends. -func (b *multiLogger) Log(level Level, calldepth int, rec *Record) (err error) { - for _, backend := range b.backends { - if backend.IsEnabledFor(level, rec.Module) { - // Shallow copy of the record for the formatted cache on Record and get the - // record formatter from the backend. - r2 := *rec - if e := backend.Log(level, calldepth+1, &r2); e != nil { - err = e - } - } - } - return -} - -// GetLevel returns the highest level enabled by all backends. -func (b *multiLogger) GetLevel(module string) Level { - var level Level - for _, backend := range b.backends { - if backendLevel := backend.GetLevel(module); backendLevel > level { - level = backendLevel - } - } - return level -} - -// SetLevel propagates the same level to all backends. -func (b *multiLogger) SetLevel(level Level, module string) { - for _, backend := range b.backends { - backend.SetLevel(level, module) - } -} - -// IsEnabledFor returns true if any of the backends are enabled for it. -func (b *multiLogger) IsEnabledFor(level Level, module string) bool { - for _, backend := range b.backends { - if backend.IsEnabledFor(level, module) { - return true - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/multi_test.go b/Godeps/_workspace/src/github.com/op/go-logging/multi_test.go deleted file mode 100644 index e1da5e3d5..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/multi_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logging - -import "testing" - -func TestMultiLogger(t *testing.T) { - log1 := NewMemoryBackend(8) - log2 := NewMemoryBackend(8) - SetBackend(MultiLogger(log1, log2)) - - log := MustGetLogger("test") - log.Debug("log") - - if "log" != MemoryRecordN(log1, 0).Formatted(0) { - t.Errorf("log1: %v", MemoryRecordN(log1, 0).Formatted(0)) - } - if "log" != MemoryRecordN(log2, 0).Formatted(0) { - t.Errorf("log2: %v", MemoryRecordN(log2, 0).Formatted(0)) - } -} - -func TestMultiLoggerLevel(t *testing.T) { - log1 := NewMemoryBackend(8) - log2 := NewMemoryBackend(8) - - leveled1 := AddModuleLevel(log1) - leveled2 := AddModuleLevel(log2) - - multi := MultiLogger(leveled1, leveled2) - multi.SetLevel(ERROR, "test") - SetBackend(multi) - - log := MustGetLogger("test") - log.Notice("log") - - if nil != MemoryRecordN(log1, 0) || nil != MemoryRecordN(log2, 0) { - t.Errorf("unexpected log record") - } - - leveled1.SetLevel(DEBUG, "test") - log.Notice("log") - if "log" != MemoryRecordN(log1, 0).Formatted(0) { - t.Errorf("log1 not received") - } - if nil != MemoryRecordN(log2, 0) { - t.Errorf("log2 received") - } -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/syslog.go b/Godeps/_workspace/src/github.com/op/go-logging/syslog.go deleted file mode 100644 index 9a5c8f5d3..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/syslog.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build !windows,!plan9 - -package logging - -import "log/syslog" - -// SyslogBackend is a simple logger to syslog backend. It automatically maps -// the internal log levels to appropriate syslog log levels. -type SyslogBackend struct { - Writer *syslog.Writer -} - -// NewSyslogBackend connects to the syslog daemon using UNIX sockets with the -// given prefix. If prefix is not given, the prefix will be derived from the -// launched command. -func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { - var w *syslog.Writer - w, err = syslog.New(syslog.LOG_CRIT, prefix) - return &SyslogBackend{w}, err -} - -// NewSyslogBackendPriority is the same as NewSyslogBackend, but with custom -// syslog priority, like syslog.LOG_LOCAL3|syslog.LOG_DEBUG etc. -func NewSyslogBackendPriority(prefix string, priority syslog.Priority) (b *SyslogBackend, err error) { - var w *syslog.Writer - w, err = syslog.New(priority, prefix) - return &SyslogBackend{w}, err -} - -func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { - line := rec.Formatted(calldepth + 1) - switch level { - case CRITICAL: - return b.Writer.Crit(line) - case ERROR: - return b.Writer.Err(line) - case WARNING: - return b.Writer.Warning(line) - case NOTICE: - return b.Writer.Notice(line) - case INFO: - return b.Writer.Info(line) - case DEBUG: - return b.Writer.Debug(line) - default: - } - panic("unhandled log level") -} diff --git a/Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go b/Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go deleted file mode 100644 index 91bc18de6..000000000 --- a/Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2013, Örjan Persson. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//+build windows plan9 - -package logging - -import ( - "fmt" -) - -type Priority int - -type SyslogBackend struct { -} - -func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -func NewSyslogBackendPriority(prefix string, priority Priority) (b *SyslogBackend, err error) { - return nil, fmt.Errorf("Platform does not support syslog") -} - -func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error { - return fmt.Errorf("Platform does not support syslog") -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/.gitignore b/Godeps/_workspace/src/github.com/thoas/stats/.gitignore deleted file mode 100644 index cccae3225..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -gin-bin diff --git a/Godeps/_workspace/src/github.com/thoas/stats/.travis.yml b/Godeps/_workspace/src/github.com/thoas/stats/.travis.yml deleted file mode 100644 index 1535bca84..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/.travis.yml +++ /dev/null @@ -1,7 +0,0 @@ -language: go -before_install: - - go get github.com/stretchr/testify -go: - - 1.3 - - 1.4 -script: go test diff --git a/Godeps/_workspace/src/github.com/thoas/stats/LICENSE b/Godeps/_workspace/src/github.com/thoas/stats/LICENSE deleted file mode 100644 index 7408e8e29..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Florent Messa - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/Godeps/_workspace/src/github.com/thoas/stats/Makefile b/Godeps/_workspace/src/github.com/thoas/stats/Makefile deleted file mode 100644 index 899212de4..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -test: unit - -unit: - @(go test -v) diff --git a/Godeps/_workspace/src/github.com/thoas/stats/README.rst b/Godeps/_workspace/src/github.com/thoas/stats/README.rst deleted file mode 100644 index 32ec98420..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/README.rst +++ /dev/null @@ -1,251 +0,0 @@ -Go stats handler -================ - -.. image:: https://secure.travis-ci.org/thoas/stats.png?branch=master - :alt: Build Status - :target: http://travis-ci.org/thoas/stats - -stats is a ``net/http`` handler in golang reporting various metrics about -your web application. - -This middleware has been developed and required for the need of picfit_, -an image resizing server written in Go. - -Compatibility -------------- - -This handler supports the following frameworks at the moment: - -* `negroni`_ -* `martini`_ -* `gocraft/web `_ -* `Gin `_ -* `Goji `_ -* `Beego `_ -* `HTTPRouter `_ - -We don't support your favorite Go framework? Send me a PR or -create a new `issue `_ and -I will implement it :) - -Installation ------------- - -1. Make sure you have a Go language compiler >= 1.3 (required) and git installed. -2. Make sure you have the following go system dependencies in your $PATH: bzr, svn, hg, git -3. Ensure your GOPATH_ is properly set. -4. Download it: - -:: - - go get github.com/thoas/stats - - -Usage ------ - -Basic net/http -.............. - -To use this handler directly with ``net/http``, you need to call the -middleware with the handler itself: - -.. code-block:: go - - package main - - import ( - "net/http" - "github.com/thoas/stats" - ) - - func main() { - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - handler := stats.New().Handler(h) - http.ListenAndServe(":8080", handler) - } - -Negroni -....... - -If you are using negroni_ you can implement the handler as -a simple middleware in ``server.go``: - -.. code-block:: go - - package main - - import ( - "net/http" - "github.com/codegangsta/negroni" - "github.com/thoas/stats" - "encoding/json" - ) - - func main() { - middleware := stats.New() - - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - mux.HandleFunc("/stats", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - stats := middleware.Data() - - b, _ := json.Marshal(stats) - - w.Write(b) - }) - - n := negroni.Classic() - n.Use(middleware) - n.UseHandler(mux) - n.Run(":3000") - } - -HTTPRouter -....... - -If you are using HTTPRouter_ you need to call the middleware with the handler itself: - -.. code-block:: go - - package main - - import ( - "encoding/json" - "github.com/julienschmidt/httprouter" - "github.com/thoas/stats" - "net/http" - ) - - func main() { - router := httprouter.New() - s := stats.New() - router.GET("/stats", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - s, err := json.Marshal(s.Data()) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - w.Write(s) - }) - http.ListenAndServe(":8080", s.Handler(router)) - } - - -Martini -....... - -If you are using martini_, you can implement the handler as a wrapper of -a ``Martini.Context`` in ``server.go``: - - -.. code-block:: go - - package main - - import ( - "encoding/json" - "github.com/go-martini/martini" - "github.com/thoas/stats" - "net/http" - ) - - func main() { - middleware := stats.New() - - m := martini.Classic() - m.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - m.Get("/stats", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - stats := middleware.Data() - - b, _ := json.Marshal(stats) - - w.Write(b) - }) - - m.Use(func(c martini.Context, w http.ResponseWriter, r *http.Request) { - beginning, recorder := middleware.Begin(w) - - c.Next() - - middleware.End(beginning, recorder) - }) - m.Run() - } - -Run it in a shell: - -:: - - $ go run server.go - -Then in another shell run: - -:: - - $ curl http://localhost:3000/stats | python -m "json.tool" - -Expect the following result: - -.. code-block:: json - - { - "total_response_time": "1.907382ms", - "average_response_time": "86.699\u00b5s", - "average_response_time_sec": 8.6699e-05, - "count": 1, - "pid": 99894, - "status_code_count": { - "200": 1 - }, - "time": "2015-03-06 17:23:27.000677896 +0100 CET", - "total_count": 22, - "total_response_time_sec": 0.0019073820000000002, - "total_status_code_count": { - "200": 22 - }, - "unixtime": 1425659007, - "uptime": "4m14.502271612s", - "uptime_sec": 254.502271612 - } - -See `examples `_ to -test them. - - -Inspiration ------------ - -`Antoine Imbert `_ is the original author -of this middleware. - -Originally developed for `go-json-rest `_, -it had been ported as a simple Golang handler by `Florent Messa `_ -to be used in various frameworks. - -This middleware implements a ticker which is launched every seconds to -reset requests/sec and will implement new features in a near future :) - -.. _GOPATH: http://golang.org/doc/code.html#GOPATH -.. _StatusMiddleware: https://github.com/ant0ine/go-json-rest/blob/master/rest/status.go -.. _go-json-rest: https://github.com/ant0ine/go-json-rest -.. _negroni: https://github.com/codegangsta/negroni -.. _martini: https://github.com/go-martini/martini -.. _picfit: https://github.com/thoas/picfit -.. _HTTPRouter: https://github.com/julienschmidt/httprouter diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/beego/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/beego/server.go deleted file mode 100644 index 4310f7ef9..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/beego/server.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "github.com/astaxie/beego" - "github.com/astaxie/beego/context" - "github.com/thoas/stats" - "time" -) - -var Stats = stats.New() - -type StatsController struct { - beego.Controller -} - -func (this *StatsController) Get() { - this.Data["json"] = Stats.Data() - this.Ctx.Output.SetStatus(200) - this.ServeJson() -} - -func main() { - beego.InsertFilter("*", beego.BeforeRouter, func(ctx *context.Context) { - startTime := time.Now() - ctx.Input.SetData("stats_timer", startTime) - }) - beego.InsertFilter("*", beego.FinishRouter, func(ctx *context.Context) { - Stats.EndWithStatus(ctx.Input.GetData("stats_timer").(time.Time), ctx.Output.Status) - }) - - beego.Router("/stats", &StatsController{}) - - beego.Run() -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/gin-gonic/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/gin-gonic/server.go deleted file mode 100644 index 9ab53fe66..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/gin-gonic/server.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "github.com/gin-gonic/gin" - "github.com/thoas/stats" - "net/http" - "time" -) - -var Stats = stats.New() - -func main() { - r := gin.New() - - r.Use(func() gin.HandlerFunc { - return func(c *gin.Context) { - beginning := time.Now() - - c.Next() - - Stats.End(beginning, c.Writer) - } - }()) - - r.GET("/stats", func(c *gin.Context) { - c.JSON(http.StatusOK, Stats.Data()) - }) - - r.GET("/", func(c *gin.Context) { - c.JSON(http.StatusOK, gin.H{"hello": "world"}) - }) - - r.Run(":3001") -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/gocraft/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/gocraft/server.go deleted file mode 100644 index df9e3aca5..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/gocraft/server.go +++ /dev/null @@ -1,46 +0,0 @@ -package main - -import ( - "encoding/json" - "github.com/gocraft/web" - "github.com/thoas/stats" - "net/http" -) - -var Stats = stats.New() - -type Context struct { -} - -func (c *Context) Root(rw web.ResponseWriter, req *web.Request) { - rw.Header().Set("Content-Type", "application/json") - rw.Write([]byte("{\"hello\": \"world\"}")) -} - -func (c *Context) RetrieveStats(rw web.ResponseWriter, req *web.Request) { - rw.Header().Set("Content-Type", "application/json") - - stats := Stats.Data() - - b, _ := json.Marshal(stats) - - rw.Write(b) -} - -func (c *Context) Recording(rw web.ResponseWriter, req *web.Request, next web.NextMiddlewareFunc) { - beginning, recorder := Stats.Begin(rw) - - next(recorder, req) - - Stats.End(beginning, recorder) -} - -func main() { - router := web.New(Context{}). // Create your router - Middleware(web.LoggerMiddleware). // Use some included middleware - Middleware(web.ShowErrorsMiddleware). // ... - Middleware((*Context).Recording). // Your own middleware! - Get("/", (*Context).Root). // Add a route - Get("/stats", (*Context).RetrieveStats) // Add a route - http.ListenAndServe("localhost:3001", router) // Start the server! -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/goji/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/goji/server.go deleted file mode 100644 index ba044de41..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/goji/server.go +++ /dev/null @@ -1,29 +0,0 @@ -package main - -import ( - "encoding/json" - "github.com/thoas/stats" - "github.com/zenazn/goji" - "net/http" -) - -func main() { - middleware := stats.New() - - goji.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - goji.Get("/stats", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - stats := middleware.Data() - - b, _ := json.Marshal(stats) - - w.Write(b) - }) - - goji.Use(middleware.Handler) - goji.Serve() -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/httprouter/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/httprouter/server.go deleted file mode 100644 index cbb6941a6..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/httprouter/server.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "encoding/json" - "github.com/julienschmidt/httprouter" - "github.com/thoas/stats" - "net/http" -) - -func main() { - router := httprouter.New() - s := stats.New() - router.GET("/stats", func(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - s, err := json.Marshal(s.Data()) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } - w.Write(s) - }) - http.ListenAndServe(":8080", s.Handler(router)) -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/martini/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/martini/server.go deleted file mode 100644 index 6e5f272ed..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/martini/server.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "encoding/json" - "github.com/go-martini/martini" - "github.com/thoas/stats" - "net/http" -) - -func main() { - middleware := stats.New() - - m := martini.Classic() - m.Get("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - m.Get("/stats", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - stats := middleware.Data() - - b, _ := json.Marshal(stats) - - w.Write(b) - }) - - m.Use(func(c martini.Context, w http.ResponseWriter, r *http.Request) { - beginning, recorder := middleware.Begin(w) - - c.Next() - - middleware.End(beginning, recorder) - }) - m.Run() -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/negroni/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/negroni/server.go deleted file mode 100644 index ca9f7b26c..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/negroni/server.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "encoding/json" - "github.com/codegangsta/negroni" - "github.com/thoas/stats" - "net/http" -) - -func main() { - middleware := stats.New() - - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - mux.HandleFunc("/stats", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - - stats := middleware.Data() - - b, _ := json.Marshal(stats) - - w.Write(b) - }) - - n := negroni.Classic() - n.Use(middleware) - n.UseHandler(mux) - n.Run(":3000") -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/examples/nethttp/server.go b/Godeps/_workspace/src/github.com/thoas/stats/examples/nethttp/server.go deleted file mode 100644 index f5f4b872a..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/examples/nethttp/server.go +++ /dev/null @@ -1,16 +0,0 @@ -package main - -import ( - "github.com/thoas/stats" - "net/http" -) - -func main() { - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.Write([]byte("{\"hello\": \"world\"}")) - }) - - handler := stats.New().Handler(h) - http.ListenAndServe(":8080", handler) -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/recorder.go b/Godeps/_workspace/src/github.com/thoas/stats/recorder.go deleted file mode 100644 index c77f3e561..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/recorder.go +++ /dev/null @@ -1,60 +0,0 @@ -package stats - -import ( - "bufio" - "fmt" - "net" - "net/http" -) - -type Recorder interface { - http.ResponseWriter - Status() int -} - -type RecorderResponseWriter struct { - http.ResponseWriter - status int - size int -} - -func (r *RecorderResponseWriter) WriteHeader(code int) { - r.ResponseWriter.WriteHeader(code) - r.status = code -} - -func (r *RecorderResponseWriter) Flush() { - flusher, ok := r.ResponseWriter.(http.Flusher) - if ok { - flusher.Flush() - } -} - -func (r *RecorderResponseWriter) Status() int { - return r.status -} - -// Proxy method to Status to add support for gocraft -func (r *RecorderResponseWriter) StatusCode() int { - return r.Status() -} - -func (r *RecorderResponseWriter) Size() int { - return r.size -} - -func (r *RecorderResponseWriter) Written() bool { - return r.StatusCode() != 0 -} - -func (r *RecorderResponseWriter) CloseNotify() <-chan bool { - return r.ResponseWriter.(http.CloseNotifier).CloseNotify() -} - -func (r *RecorderResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hijacker, ok := r.ResponseWriter.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("the ResponseWriter doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/stats.go b/Godeps/_workspace/src/github.com/thoas/stats/stats.go deleted file mode 100644 index 658610a8a..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/stats.go +++ /dev/null @@ -1,155 +0,0 @@ -package stats - -import ( - "fmt" - "net/http" - "os" - "sync" - "time" -) - -type Stats struct { - mu sync.RWMutex - Uptime time.Time - Pid int - ResponseCounts map[string]int - TotalResponseCounts map[string]int - TotalResponseTime time.Time -} - -func New() *Stats { - stats := &Stats{ - Uptime: time.Now(), - Pid: os.Getpid(), - ResponseCounts: map[string]int{}, - TotalResponseCounts: map[string]int{}, - TotalResponseTime: time.Time{}, - } - - go func() { - for { - stats.ResetResponseCounts() - - time.Sleep(time.Second * 1) - } - }() - - return stats -} - -func (mw *Stats) ResetResponseCounts() { - mw.mu.Lock() - defer mw.mu.Unlock() - mw.ResponseCounts = map[string]int{} -} - -// MiddlewareFunc makes Stats implement the Middleware interface. -func (mw *Stats) Handler(h http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - beginning, recorder := mw.Begin(w) - - h.ServeHTTP(recorder, r) - - mw.End(beginning, recorder) - }) -} - -// Negroni compatible interface -func (mw *Stats) ServeHTTP(w http.ResponseWriter, r *http.Request, next http.HandlerFunc) { - beginning, recorder := mw.Begin(w) - - next(recorder, r) - - mw.End(beginning, recorder) -} - -func (mw *Stats) Begin(w http.ResponseWriter) (time.Time, Recorder) { - start := time.Now() - - writer := &RecorderResponseWriter{w, 200, 0} - - return start, writer -} - -func (mw *Stats) EndWithStatus(start time.Time, status int) { - end := time.Now() - - responseTime := end.Sub(start) - - mw.mu.Lock() - - defer mw.mu.Unlock() - - statusCode := fmt.Sprintf("%d", status) - - mw.ResponseCounts[statusCode]++ - mw.TotalResponseCounts[statusCode]++ - mw.TotalResponseTime = mw.TotalResponseTime.Add(responseTime) -} - -func (mw *Stats) End(start time.Time, recorder Recorder) { - mw.EndWithStatus(start, recorder.Status()) -} - -type data struct { - Pid int `json:"pid"` - UpTime string `json:"uptime"` - UpTimeSec float64 `json:"uptime_sec"` - Time string `json:"time"` - TimeUnix int64 `json:"unixtime"` - StatusCodeCount map[string]int `json:"status_code_count"` - TotalStatusCodeCount map[string]int `json:"total_status_code_count"` - Count int `json:"count"` - TotalCount int `json:"total_count"` - TotalResponseTime string `json:"total_response_time"` - TotalResponseTimeSec float64 `json:"total_response_time_sec"` - AverageResponseTime string `json:"average_response_time"` - AverageResponseTimeSec float64 `json:"average_response_time_sec"` -} - -func (mw *Stats) Data() *data { - - mw.mu.RLock() - - now := time.Now() - - uptime := now.Sub(mw.Uptime) - - count := 0 - for _, current := range mw.ResponseCounts { - count += current - } - - totalCount := 0 - for _, count := range mw.TotalResponseCounts { - totalCount += count - } - - totalResponseTime := mw.TotalResponseTime.Sub(time.Time{}) - - averageResponseTime := time.Duration(0) - if totalCount > 0 { - avgNs := int64(totalResponseTime) / int64(totalCount) - averageResponseTime = time.Duration(avgNs) - } - - r := &data{ - Pid: mw.Pid, - UpTime: uptime.String(), - UpTimeSec: uptime.Seconds(), - Time: now.String(), - TimeUnix: now.Unix(), - StatusCodeCount: mw.ResponseCounts, - TotalStatusCodeCount: mw.TotalResponseCounts, - Count: count, - TotalCount: totalCount, - TotalResponseTime: totalResponseTime.String(), - TotalResponseTimeSec: totalResponseTime.Seconds(), - AverageResponseTime: averageResponseTime.String(), - AverageResponseTimeSec: averageResponseTime.Seconds(), - } - - mw.mu.RUnlock() - - return r -} diff --git a/Godeps/_workspace/src/github.com/thoas/stats/stats_test.go b/Godeps/_workspace/src/github.com/thoas/stats/stats_test.go deleted file mode 100644 index bfd581267..000000000 --- a/Godeps/_workspace/src/github.com/thoas/stats/stats_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package stats - -import ( - "encoding/json" - "github.com/stretchr/testify/assert" - "net/http" - "net/http/httptest" - "testing" -) - -var testHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) - w.Write([]byte("bar")) -}) - -func TestSimple(t *testing.T) { - s := New() - - res := httptest.NewRecorder() - - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - - s.Handler(testHandler).ServeHTTP(res, req) - - assert.Equal(t, res.Code, 200) - assert.Equal(t, s.ResponseCounts, map[string]int{"200": 1}) -} - -func TestGetStats(t *testing.T) { - s := New() - - var stats = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - - stats := s.Data() - - b, _ := json.Marshal(stats) - - w.Write(b) - w.WriteHeader(200) - w.Header().Add("Content-Type", "application/json") - }) - - res := httptest.NewRecorder() - - req, _ := http.NewRequest("GET", "http://example.com/foo", nil) - - s.Handler(testHandler).ServeHTTP(res, req) - - res = httptest.NewRecorder() - - s.Handler(stats).ServeHTTP(res, req) - - assert.Equal(t, res.Header().Get("Content-Type"), "application/json") - - var data map[string]interface{} - - err := json.Unmarshal(res.Body.Bytes(), &data) - - assert.Nil(t, err) - - assert.Equal(t, data["total_count"].(float64), float64(1)) -} diff --git a/Godeps/_workspace/src/github.com/tylerb/graceful/.gitignore b/Godeps/_workspace/src/github.com/tylerb/graceful/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/Godeps/_workspace/src/github.com/tylerb/graceful/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/Godeps/_workspace/src/github.com/tylerb/graceful/LICENSE b/Godeps/_workspace/src/github.com/tylerb/graceful/LICENSE deleted file mode 100644 index a4f2f281b..000000000 --- a/Godeps/_workspace/src/github.com/tylerb/graceful/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Tyler Bunnell - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/tylerb/graceful/README.md b/Godeps/_workspace/src/github.com/tylerb/graceful/README.md deleted file mode 100644 index 531249a73..000000000 --- a/Godeps/_workspace/src/github.com/tylerb/graceful/README.md +++ /dev/null @@ -1,137 +0,0 @@ -graceful [![GoDoc](https://godoc.org/github.com/tylerb/graceful?status.png)](http://godoc.org/github.com/tylerb/graceful) [![Build Status](https://drone.io/github.com/tylerb/graceful/status.png)](https://drone.io/github.com/tylerb/graceful/latest) [![Coverage Status](https://coveralls.io/repos/tylerb/graceful/badge.svg?branch=dronedebug)](https://coveralls.io/r/tylerb/graceful?branch=dronedebug) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tylerb/graceful?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) -======== - -Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers. - -## Installation - -To install, simply execute: - -``` -go get gopkg.in/tylerb/graceful.v1 -``` - -I am using [gopkg.in](http://labix.org/gopkg.in) to control releases. - -## Usage - -Using Graceful is easy. Simply create your http.Handler and pass it to the `Run` function: - -```go -package main - -import ( - "gopkg.in/tylerb/graceful.v1" - "net/http" - "fmt" - "time" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - graceful.Run(":3001",10*time.Second,mux) -} -``` - -Another example, using [Negroni](https://github.com/codegangsta/negroni), functions in much the same manner: - -```go -package main - -import ( - "github.com/codegangsta/negroni" - "gopkg.in/tylerb/graceful.v1" - "net/http" - "fmt" - "time" -) - -func main() { - mux := http.NewServeMux() - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "Welcome to the home page!") - }) - - n := negroni.Classic() - n.UseHandler(mux) - //n.Run(":3000") - graceful.Run(":3001",10*time.Second,n) -} -``` - -In addition to Run there are the http.Server counterparts ListenAndServe, ListenAndServeTLS and Serve, which allow you to configure HTTPS, custom timeouts and error handling. -Graceful may also be used by instantiating its Server type directly, which embeds an http.Server: - -```go -mux := // ... - -srv := &graceful.Server{ - Timeout: 10 * time.Second, - - Server: &http.Server{ - Addr: ":1234", - Handler: mux, - }, -} - -srv.ListenAndServe() -``` - -This form allows you to set the ConnState callback, which works in the same way as in http.Server: - -```go -mux := // ... - -srv := &graceful.Server{ - Timeout: 10 * time.Second, - - ConnState: func(conn net.Conn, state http.ConnState) { - // conn has a new state - }, - - Server: &http.Server{ - Addr: ":1234", - Handler: mux, - }, -} - -srv.ListenAndServe() -``` - -## Behaviour - -When Graceful is sent a SIGINT or SIGTERM (possibly from ^C or a kill command), it: - -1. Disables keepalive connections. -2. Closes the listening socket, allowing another process to listen on that port immediately. -3. Starts a timer of `timeout` duration to give active requests a chance to finish. -4. When timeout expires, closes all active connections. -5. Closes the `stopChan`, waking up any blocking goroutines. -6. Returns from the function, allowing the server to terminate. - -## Notes - -If the `timeout` argument to `Run` is 0, the server never times out, allowing all active requests to complete. - -If you wish to stop the server in some way other than an OS signal, you may call the `Stop()` function. -This function stops the server, gracefully, using the new timeout value you provide. The `StopChan()` function -returns a channel on which you can block while waiting for the server to stop. This channel will be closed when -the server is stopped, allowing your execution to proceed. Multiple goroutines can block on this channel at the -same time and all will be signalled when stopping is complete. - -## Contributing - -If you would like to contribute, please: - -1. Create a GitHub issue regarding the contribution. Features and bugs should be discussed beforehand. -2. Fork the repository. -3. Create a pull request with your solution. This pull request should reference and close the issues (Fix #2). - -All pull requests should: - -1. Pass [gometalinter -t .](https://github.com/alecthomas/gometalinter) with no warnings. -2. Be `go fmt` formatted. diff --git a/Godeps/_workspace/src/github.com/tylerb/graceful/graceful.go b/Godeps/_workspace/src/github.com/tylerb/graceful/graceful.go deleted file mode 100644 index 8fa2b44e2..000000000 --- a/Godeps/_workspace/src/github.com/tylerb/graceful/graceful.go +++ /dev/null @@ -1,334 +0,0 @@ -package graceful - -import ( - "crypto/tls" - "log" - "net" - "net/http" - "os" - "os/signal" - "sync" - "syscall" - "time" - - "golang.org/x/net/netutil" -) - -// Server wraps an http.Server with graceful connection handling. -// It may be used directly in the same way as http.Server, or may -// be constructed with the global functions in this package. -// -// Example: -// srv := &graceful.Server{ -// Timeout: 5 * time.Second, -// Server: &http.Server{Addr: ":1234", Handler: handler}, -// } -// srv.ListenAndServe() -type Server struct { - *http.Server - - // Timeout is the duration to allow outstanding requests to survive - // before forcefully terminating them. - Timeout time.Duration - - // Limit the number of outstanding requests - ListenLimit int - - // ConnState specifies an optional callback function that is - // called when a client connection changes state. This is a proxy - // to the underlying http.Server's ConnState, and the original - // must not be set directly. - ConnState func(net.Conn, http.ConnState) - - // ShutdownInitiated is an optional callback function that is called - // when shutdown is initiated. It can be used to notify the client - // side of long lived connections (e.g. websockets) to reconnect. - ShutdownInitiated func() - - // NoSignalHandling prevents graceful from automatically shutting down - // on SIGINT and SIGTERM. If set to true, you must shut down the server - // manually with Stop(). - NoSignalHandling bool - - // interrupt signals the listener to stop serving connections, - // and the server to shut down. - interrupt chan os.Signal - - // stopLock is used to protect against concurrent calls to Stop - stopLock sync.Mutex - - // stopChan is the channel on which callers may block while waiting for - // the server to stop. - stopChan chan struct{} - - // chanLock is used to protect access to the various channel constructors. - chanLock sync.RWMutex - - // connections holds all connections managed by graceful - connections map[net.Conn]struct{} -} - -// Run serves the http.Handler with graceful shutdown enabled. -// -// timeout is the duration to wait until killing active requests and stopping the server. -// If timeout is 0, the server never times out. It waits for all active requests to finish. -func Run(addr string, timeout time.Duration, n http.Handler) { - srv := &Server{ - Timeout: timeout, - Server: &http.Server{Addr: addr, Handler: n}, - } - - if err := srv.ListenAndServe(); err != nil { - if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { - logger := log.New(os.Stdout, "[graceful] ", 0) - logger.Fatal(err) - } - } -} - -// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled. -// -// timeout is the duration to wait until killing active requests and stopping the server. -// If timeout is 0, the server never times out. It waits for all active requests to finish. -func ListenAndServe(server *http.Server, timeout time.Duration) error { - srv := &Server{Timeout: timeout, Server: server} - return srv.ListenAndServe() -} - -// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled. -func (srv *Server) ListenAndServe() error { - // Create the listener so we can control their lifetime - addr := srv.Addr - if addr == "" { - addr = ":http" - } - l, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - if srv.ListenLimit != 0 { - l = netutil.LimitListener(l, srv.ListenLimit) - } - return srv.Serve(l) -} - -// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled. -// -// timeout is the duration to wait until killing active requests and stopping the server. -// If timeout is 0, the server never times out. It waits for all active requests to finish. -func ListenAndServeTLS(server *http.Server, certFile, keyFile string, timeout time.Duration) error { - srv := &Server{Timeout: timeout, Server: server} - return srv.ListenAndServeTLS(certFile, keyFile) -} - -// ListenTLS is a convenience method that creates an https listener using the -// provided cert and key files. Use this method if you need access to the -// listener object directly. When ready, pass it to the Serve method. -func (srv *Server) ListenTLS(certFile, keyFile string) (net.Listener, error) { - // Create the listener ourselves so we can control its lifetime - addr := srv.Addr - if addr == "" { - addr = ":https" - } - - config := &tls.Config{} - if srv.TLSConfig != nil { - *config = *srv.TLSConfig - } - if config.NextProtos == nil { - config.NextProtos = []string{"http/1.1"} - } - - var err error - config.Certificates = make([]tls.Certificate, 1) - config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - - conn, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - - tlsListener := tls.NewListener(conn, config) - return tlsListener, nil -} - -// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled. -func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { - l, err := srv.ListenTLS(certFile, keyFile) - if err != nil { - return err - } - return srv.Serve(l) -} - -// ListenAndServeTLSConfig can be used with an existing TLS config and is equivalent to -// http.Server.ListenAndServeTLS with graceful shutdown enabled, -func (srv *Server) ListenAndServeTLSConfig(config *tls.Config) error { - addr := srv.Addr - if addr == "" { - addr = ":https" - } - - conn, err := net.Listen("tcp", addr) - if err != nil { - return err - } - - tlsListener := tls.NewListener(conn, config) - return srv.Serve(tlsListener) -} - -// Serve is equivalent to http.Server.Serve with graceful shutdown enabled. -// -// timeout is the duration to wait until killing active requests and stopping the server. -// If timeout is 0, the server never times out. It waits for all active requests to finish. -func Serve(server *http.Server, l net.Listener, timeout time.Duration) error { - srv := &Server{Timeout: timeout, Server: server} - return srv.Serve(l) -} - -// Serve is equivalent to http.Server.Serve with graceful shutdown enabled. -func (srv *Server) Serve(listener net.Listener) error { - // Track connection state - add := make(chan net.Conn) - remove := make(chan net.Conn) - - srv.Server.ConnState = func(conn net.Conn, state http.ConnState) { - switch state { - case http.StateNew: - add <- conn - case http.StateClosed, http.StateHijacked: - remove <- conn - } - if srv.ConnState != nil { - srv.ConnState(conn, state) - } - } - - // Manage open connections - shutdown := make(chan chan struct{}) - kill := make(chan struct{}) - go srv.manageConnections(add, remove, shutdown, kill) - - interrupt := srv.interruptChan() - // Set up the interrupt handler - if !srv.NoSignalHandling { - signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) - } - go srv.handleInterrupt(interrupt, listener) - - // Serve with graceful listener. - // Execution blocks here until listener.Close() is called, above. - err := srv.Server.Serve(listener) - - srv.shutdown(shutdown, kill) - - return err -} - -// Stop instructs the type to halt operations and close -// the stop channel when it is finished. -// -// timeout is grace period for which to wait before shutting -// down the server. The timeout value passed here will override the -// timeout given when constructing the server, as this is an explicit -// command to stop the server. -func (srv *Server) Stop(timeout time.Duration) { - srv.stopLock.Lock() - srv.Timeout = timeout - interrupt := srv.interruptChan() - interrupt <- syscall.SIGINT - srv.stopLock.Unlock() -} - -// StopChan gets the stop channel which will block until -// stopping has completed, at which point it is closed. -// Callers should never close the stop channel. -func (srv *Server) StopChan() <-chan struct{} { - srv.chanLock.Lock() - if srv.stopChan == nil { - srv.stopChan = make(chan struct{}) - } - srv.chanLock.Unlock() - return srv.stopChan -} - -func (srv *Server) manageConnections(add, remove chan net.Conn, shutdown chan chan struct{}, kill chan struct{}) { - var done chan struct{} - srv.connections = map[net.Conn]struct{}{} - for { - select { - case conn := <-add: - srv.connections[conn] = struct{}{} - case conn := <-remove: - delete(srv.connections, conn) - if done != nil && len(srv.connections) == 0 { - done <- struct{}{} - return - } - case done = <-shutdown: - if len(srv.connections) == 0 { - done <- struct{}{} - return - } - case <-kill: - for k := range srv.connections { - _ = k.Close() // nothing to do here if it errors - } - return - } - } -} - -func (srv *Server) interruptChan() chan os.Signal { - srv.chanLock.Lock() - if srv.interrupt == nil { - srv.interrupt = make(chan os.Signal, 1) - } - srv.chanLock.Unlock() - - return srv.interrupt -} - -func (srv *Server) handleInterrupt(interrupt chan os.Signal, listener net.Listener) { - <-interrupt - - srv.SetKeepAlivesEnabled(false) - _ = listener.Close() // we are shutting down anyway. ignore error. - - if srv.ShutdownInitiated != nil { - srv.ShutdownInitiated() - } - - srv.stopLock.Lock() - signal.Stop(interrupt) - close(interrupt) - srv.interrupt = nil - srv.stopLock.Unlock() -} - -func (srv *Server) shutdown(shutdown chan chan struct{}, kill chan struct{}) { - // Request done notification - done := make(chan struct{}) - shutdown <- done - - if srv.Timeout > 0 { - select { - case <-done: - case <-time.After(srv.Timeout): - close(kill) - } - } else { - <-done - } - // Close the stopChan to wake up any blocked goroutines. - srv.chanLock.Lock() - if srv.stopChan != nil { - close(srv.stopChan) - } - srv.chanLock.Unlock() -} diff --git a/Godeps/_workspace/src/github.com/tylerb/graceful/graceful_test.go b/Godeps/_workspace/src/github.com/tylerb/graceful/graceful_test.go deleted file mode 100644 index 36396fe82..000000000 --- a/Godeps/_workspace/src/github.com/tylerb/graceful/graceful_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package graceful - -import ( - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "reflect" - "sync" - "syscall" - "testing" - "time" -) - -var ( - killTime = 500 * time.Millisecond - timeoutTime = 1000 * time.Millisecond - waitTime = 100 * time.Millisecond -) - -func runQuery(t *testing.T, expected int, shouldErr bool, wg *sync.WaitGroup, once *sync.Once) { - wg.Add(1) - defer wg.Done() - client := http.Client{} - r, err := client.Get("http://localhost:9654") - if shouldErr && err == nil { - once.Do(func() { - t.Fatal("Expected an error but none was encountered.") - }) - } else if shouldErr && err != nil { - if checkErr(t, err, once) { - return - } - } - if r != nil && r.StatusCode != expected { - once.Do(func() { - t.Fatalf("Incorrect status code on response. Expected %d. Got %d", expected, r.StatusCode) - }) - } else if r == nil { - once.Do(func() { - t.Fatal("No response when a response was expected.") - }) - } -} - -func checkErr(t *testing.T, err error, once *sync.Once) bool { - if err.(*url.Error).Err == io.EOF { - return true - } - errno := err.(*url.Error).Err.(*net.OpError).Err.(syscall.Errno) - if errno == syscall.ECONNREFUSED { - return true - } else if err != nil { - once.Do(func() { - t.Fatal("Error on Get:", err) - }) - } - return false -} - -func createListener(sleep time.Duration) (*http.Server, net.Listener, error) { - mux := http.NewServeMux() - mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - time.Sleep(sleep) - rw.WriteHeader(http.StatusOK) - }) - - server := &http.Server{Addr: ":9654", Handler: mux} - l, err := net.Listen("tcp", ":9654") - if err != nil { - fmt.Println(err) - } - return server, l, err -} - -func runServer(timeout, sleep time.Duration, c chan os.Signal) error { - server, l, err := createListener(sleep) - if err != nil { - return err - } - - srv := &Server{Timeout: timeout, Server: server, interrupt: c} - return srv.Serve(l) -} - -func launchTestQueries(t *testing.T, wg *sync.WaitGroup, c chan os.Signal) { - var once sync.Once - for i := 0; i < 8; i++ { - go runQuery(t, http.StatusOK, false, wg, &once) - } - - time.Sleep(waitTime) - c <- os.Interrupt - time.Sleep(waitTime) - - for i := 0; i < 8; i++ { - go runQuery(t, 0, true, wg, &once) - } - - wg.Done() -} - -func TestGracefulRun(t *testing.T) { - c := make(chan os.Signal, 1) - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - runServer(killTime, killTime/2, c) - wg.Done() - }() - - wg.Add(1) - go launchTestQueries(t, &wg, c) - wg.Wait() -} - -func TestGracefulRunTimesOut(t *testing.T) { - c := make(chan os.Signal, 1) - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - runServer(killTime, killTime*10, c) - wg.Done() - }() - - var once sync.Once - wg.Add(1) - go func() { - for i := 0; i < 8; i++ { - go runQuery(t, 0, true, &wg, &once) - } - time.Sleep(waitTime) - c <- os.Interrupt - time.Sleep(waitTime) - for i := 0; i < 8; i++ { - go runQuery(t, 0, true, &wg, &once) - } - wg.Done() - }() - - wg.Wait() - -} - -func TestGracefulRunDoesntTimeOut(t *testing.T) { - c := make(chan os.Signal, 1) - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - runServer(0, killTime*2, c) - wg.Done() - }() - - wg.Add(1) - go launchTestQueries(t, &wg, c) - wg.Wait() -} - -func TestGracefulRunNoRequests(t *testing.T) { - c := make(chan os.Signal, 1) - - var wg sync.WaitGroup - wg.Add(1) - - go func() { - runServer(0, killTime*2, c) - wg.Done() - }() - - c <- os.Interrupt - - wg.Wait() - -} - -func TestGracefulForwardsConnState(t *testing.T) { - c := make(chan os.Signal, 1) - states := make(map[http.ConnState]int) - var stateLock sync.Mutex - - connState := func(conn net.Conn, state http.ConnState) { - stateLock.Lock() - states[state]++ - stateLock.Unlock() - } - - var wg sync.WaitGroup - wg.Add(1) - - expected := map[http.ConnState]int{ - http.StateNew: 8, - http.StateActive: 8, - http.StateClosed: 8, - } - - go func() { - server, l, _ := createListener(killTime / 2) - srv := &Server{ - ConnState: connState, - Timeout: killTime, - Server: server, - interrupt: c, - } - srv.Serve(l) - - wg.Done() - }() - - wg.Add(1) - go launchTestQueries(t, &wg, c) - wg.Wait() - - stateLock.Lock() - if !reflect.DeepEqual(states, expected) { - t.Errorf("Incorrect connection state tracking.\n actual: %v\nexpected: %v\n", states, expected) - } - stateLock.Unlock() -} - -func TestGracefulExplicitStop(t *testing.T) { - server, l, err := createListener(1 * time.Millisecond) - if err != nil { - t.Fatal(err) - } - - srv := &Server{Timeout: killTime, Server: server} - - go func() { - go srv.Serve(l) - time.Sleep(waitTime) - srv.Stop(killTime) - }() - - // block on the stopChan until the server has shut down - select { - case <-srv.StopChan(): - case <-time.After(timeoutTime): - t.Fatal("Timed out while waiting for explicit stop to complete") - } -} - -func TestGracefulExplicitStopOverride(t *testing.T) { - server, l, err := createListener(1 * time.Millisecond) - if err != nil { - t.Fatal(err) - } - - srv := &Server{Timeout: killTime, Server: server} - - go func() { - go srv.Serve(l) - time.Sleep(waitTime) - srv.Stop(killTime / 2) - }() - - // block on the stopChan until the server has shut down - select { - case <-srv.StopChan(): - case <-time.After(killTime): - t.Fatal("Timed out while waiting for explicit stop to complete") - } -} - -func TestShutdownInitiatedCallback(t *testing.T) { - server, l, err := createListener(1 * time.Millisecond) - if err != nil { - t.Fatal(err) - } - - called := make(chan struct{}) - cb := func() { close(called) } - - srv := &Server{Server: server, ShutdownInitiated: cb} - - go func() { - go srv.Serve(l) - time.Sleep(waitTime) - srv.Stop(killTime) - }() - - select { - case <-called: - case <-time.After(killTime): - t.Fatal("Timed out while waiting for ShutdownInitiated callback to be called") - } -} -func hijackingListener(srv *Server) (*http.Server, net.Listener, error) { - mux := http.NewServeMux() - mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - conn, bufrw, err := rw.(http.Hijacker).Hijack() - if err != nil { - http.Error(rw, "webserver doesn't support hijacking", http.StatusInternalServerError) - return - } - - defer conn.Close() - - bufrw.WriteString("HTTP/1.1 200 OK\r\n\r\n") - bufrw.Flush() - }) - - server := &http.Server{Addr: ":9654", Handler: mux} - l, err := net.Listen("tcp", ":9654") - return server, l, err -} - -func TestNotifyClosed(t *testing.T) { - c := make(chan os.Signal, 1) - - var wg sync.WaitGroup - wg.Add(1) - - srv := &Server{Timeout: killTime, interrupt: c} - server, l, err := hijackingListener(srv) - if err != nil { - t.Fatal(err) - } - - srv.Server = server - - go func() { - srv.Serve(l) - wg.Done() - }() - - var once sync.Once - for i := 0; i < 8; i++ { - runQuery(t, http.StatusOK, false, &wg, &once) - } - - srv.Stop(0) - - // block on the stopChan until the server has shut down - select { - case <-srv.StopChan(): - case <-time.After(timeoutTime): - t.Fatal("Timed out while waiting for explicit stop to complete") - } - - if len(srv.connections) > 0 { - t.Fatal("hijacked connections should not be managed") - } - -} - -func TestStopDeadlock(t *testing.T) { - c := make(chan struct{}) - - server, l, err := createListener(1 * time.Millisecond) - if err != nil { - t.Fatal(err) - } - - srv := &Server{Server: server, NoSignalHandling: true} - - go func() { - time.Sleep(waitTime) - srv.Serve(l) - }() - - go func() { - srv.Stop(0) - close(c) - }() - - select { - case <-c: - l.Close() - case <-time.After(timeoutTime): - t.Fatal("Timed out while waiting for explicit stop to complete") - } -} - -// Run with --race -func TestStopRace(t *testing.T) { - server, l, err := createListener(1 * time.Millisecond) - if err != nil { - t.Fatal(err) - } - - srv := &Server{Timeout: killTime, Server: server} - - go func() { - go srv.Serve(l) - srv.Stop(killTime) - }() - srv.Stop(0) - select { - case <-srv.StopChan(): - case <-time.After(timeoutTime): - t.Fatal("Timed out while waiting for explicit stop to complete") - } -} diff --git a/Godeps/_workspace/src/github.com/tylerb/graceful/tests/main.go b/Godeps/_workspace/src/github.com/tylerb/graceful/tests/main.go deleted file mode 100644 index 8c8fa2046..000000000 --- a/Godeps/_workspace/src/github.com/tylerb/graceful/tests/main.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "fmt" - "sync" - - "github.com/codegangsta/negroni" - "github.com/tylerb/graceful" -) - -func main() { - - var wg sync.WaitGroup - - wg.Add(3) - go func() { - n := negroni.New() - fmt.Println("Launching server on :3000") - graceful.Run(":3000", 0, n) - fmt.Println("Terminated server on :3000") - wg.Done() - }() - go func() { - n := negroni.New() - fmt.Println("Launching server on :3001") - graceful.Run(":3001", 0, n) - fmt.Println("Terminated server on :3001") - wg.Done() - }() - go func() { - n := negroni.New() - fmt.Println("Launching server on :3002") - graceful.Run(":3002", 0, n) - fmt.Println("Terminated server on :3002") - wg.Done() - }() - fmt.Println("Press ctrl+c. All servers should terminate.") - wg.Wait() - -} diff --git a/Godeps/_workspace/src/github.com/unrolled/render/.gitignore b/Godeps/_workspace/src/github.com/unrolled/render/.gitignore deleted file mode 100644 index 05f4eaf61..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/.gitignore +++ /dev/null @@ -1,27 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - - -*.pem -.DS_Store diff --git a/Godeps/_workspace/src/github.com/unrolled/render/.travis.yml b/Godeps/_workspace/src/github.com/unrolled/render/.travis.yml deleted file mode 100644 index 83080ce76..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - 1.5 - - tip - -install: - - go get github.com/eknkc/amber - -script: - - go test -v ./... -tags=integration diff --git a/Godeps/_workspace/src/github.com/unrolled/render/LICENSE b/Godeps/_workspace/src/github.com/unrolled/render/LICENSE deleted file mode 100644 index 9c62063ec..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Cory Jacobsen - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/unrolled/render/README.md b/Godeps/_workspace/src/github.com/unrolled/render/README.md deleted file mode 100644 index a33096a15..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/README.md +++ /dev/null @@ -1,475 +0,0 @@ -# Render [![GoDoc](http://godoc.org/github.com/unrolled/render?status.svg)](http://godoc.org/github.com/unrolled/render) [![Build Status](https://travis-ci.org/unrolled/render.svg)](https://travis-ci.org/unrolled/render) - -Render is a package that provides functionality for easily rendering JSON, XML, text, binary data, and HTML templates. This package is based on the [Martini](https://github.com/go-martini/martini) [render](https://github.com/martini-contrib/render) work. - -## Usage -Render can be used with pretty much any web framework providing you can access the `http.ResponseWriter` from your handler. The rendering functions simply wraps Go's existing functionality for marshaling and rendering data. - -- HTML: Uses the [html/template](http://golang.org/pkg/html/template/) package to render HTML templates. -- JSON: Uses the [encoding/json](http://golang.org/pkg/encoding/json/) package to marshal data into a JSON-encoded response. -- XML: Uses the [encoding/xml](http://golang.org/pkg/encoding/xml/) package to marshal data into an XML-encoded response. -- Binary data: Passes the incoming data straight through to the `http.ResponseWriter`. -- Text: Passes the incoming string straight through to the `http.ResponseWriter`. - -~~~ go -// main.go -package main - -import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func main() { - r := render.New() - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("Welcome, visit sub pages now.")) - }) - - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - mux.HandleFunc("/jsonp", func(w http.ResponseWriter, req *http.Request) { - r.JSONP(w, http.StatusOK, "callbackName", map[string]string{"hello": "jsonp"}) - }) - - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", nil) - }) - - http.ListenAndServe("127.0.0.1:3000", mux) -} -~~~ - -~~~ html - -

Hello {{.}}.

-~~~ - -### Available Options -Render comes with a variety of configuration options _(Note: these are not the default option values. See the defaults below.)_: - -~~~ go -// ... -r := render.New(render.Options{ - Directory: "templates", // Specify what path to load the templates from. - Asset: func(name string) ([]byte, error) { // Load from an Asset function instead of file. - return []byte("template content"), nil - }, - AssetNames: func() []string { // Return a list of asset names for the Asset function - return []string{"filename.tmpl"} - }, - Layout: "layout", // Specify a layout template. Layouts can call {{ yield }} to render the current template or {{ block "css" }} to render a block from the current template - Extensions: []string{".tmpl", ".html"}, // Specify extensions to load for templates. - Funcs: []template.FuncMap{AppHelpers}, // Specify helper function maps for templates to access. - Delims: render.Delims{"{[{", "}]}"}, // Sets delimiters to the specified strings. - Charset: "UTF-8", // Sets encoding for json and html content-types. Default is "UTF-8". - IndentJSON: true, // Output human readable JSON. - IndentXML: true, // Output human readable XML. - PrefixJSON: []byte(")]}',\n"), // Prefixes JSON responses with the given bytes. - PrefixXML: []byte(""), // Prefixes XML responses with the given bytes. - HTMLContentType: "application/xhtml+xml", // Output XHTML content type instead of default "text/html". - IsDevelopment: true, // Render will now recompile the templates on every HTML response. - UnEscapeHTML: true, // Replace ensure '&<>' are output correctly (JSON only). - StreamingJSON: true, // Streams the JSON response via json.Encoder. -}) -// ... -~~~ - -### Default Options -These are the preset options for Render: - -~~~ go -r := render.New() - -// Is the same as the default configuration options: - -r := render.New(render.Options{ - Directory: "templates", - Asset: nil, - AssetNames: nil, - Layout: "", - Extensions: []string{".tmpl"}, - Funcs: []template.FuncMap{}, - Delims: render.Delims{"{{", "}}"}, - Charset: "UTF-8", - IndentJSON: false, - IndentXML: false, - PrefixJSON: []byte(""), - PrefixXML: []byte(""), - HTMLContentType: "text/html", - IsDevelopment: false, - UnEscapeHTML: false, - StreamingJSON: false, -}) -~~~ - -### JSON vs Streaming JSON -By default, Render does **not** stream JSON to the `http.ResponseWriter`. It instead marshalls your object into a byte array, and if no errors occurred, writes that byte array to the `http.ResponseWriter`. This is ideal as you can catch errors before sending any data. - -If however you have the need to stream your JSON response (ie: dealing with massive objects), you can set the `StreamingJSON` option to true. This will use the `json.Encoder` to stream the output to the `http.ResponseWriter`. If an error occurs, you will receive the error in your code, but the response will have already been sent. Also note that streaming is only implemented in `render.JSON` and not `render.JSONP`, and the `UnEscapeHTML` and `Indent` options are ignored when streaming. - -### Loading Templates -By default Render will attempt to load templates with a '.tmpl' extension from the "templates" directory. Templates are found by traversing the templates directory and are named by path and basename. For instance, the following directory structure: - -~~~ -templates/ - | - |__ admin/ - | | - | |__ index.tmpl - | | - | |__ edit.tmpl - | - |__ home.tmpl -~~~ - -Will provide the following templates: -~~~ -admin/index -admin/edit -home -~~~ - -You can also load templates from memory by providing the Asset and AssetNames options, -e.g. when generating an asset file using [go-bindata](https://github.com/jteeuwen/go-bindata). - -### Layouts -Render provides `yield` and `block` functions for layouts to access: -~~~ go -// ... -r := render.New(render.Options{ - Layout: "layout", -}) -// ... -~~~ - -~~~ html - - - - My Layout - - {{ block "css" }} - - - - {{ block "header" }} - - {{ yield }} - - {{ block "footer" }} - - -~~~ - -`current` can also be called to get the current template being rendered. -~~~ html - - - - My Layout - - - This is the {{ current }} page. - - -~~~ - -### Character Encodings -Render will automatically set the proper Content-Type header based on which function you call. See below for an example of what the default settings would output (note that UTF-8 is the default, and binary data does not output the charset): -~~~ go -// main.go -package main - -import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func main() { - r := render.New(render.Options{}) - mux := http.NewServeMux() - - // This will set the Content-Type header to "application/octet-stream". - // Note that this does not receive a charset value. - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - // This will set the Content-Type header to "application/json; charset=UTF-8". - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - // This will set the Content-Type header to "text/xml; charset=UTF-8". - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - // This will set the Content-Type header to "text/plain; charset=UTF-8". - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - // This will set the Content-Type header to "text/html; charset=UTF-8". - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", nil) - }) - - http.ListenAndServe("127.0.0.1:3000", mux) -} -~~~ - -In order to change the charset, you can set the `Charset` within the `render.Options` to your encoding value: -~~~ go -// main.go -package main - -import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` -} - -func main() { - r := render.New(render.Options{ - Charset: "ISO-8859-1", - }) - mux := http.NewServeMux() - - // This will set the Content-Type header to "application/octet-stream". - // Note that this does not receive a charset value. - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - // This will set the Content-Type header to "application/json; charset=ISO-8859-1". - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - // This will set the Content-Type header to "text/xml; charset=ISO-8859-1". - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - // This will set the Content-Type header to "text/plain; charset=ISO-8859-1". - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - // This will set the Content-Type header to "text/html; charset=ISO-8859-1". - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello {{.}}.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", nil) - }) - - http.ListenAndServe("127.0.0.1:3000", mux) -} -~~~ - -## Integration Examples - -### [Echo](https://github.com/labstack/echo) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/labstack/echo" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - e := echo.New() - - // Routes - e.Get("/", func(c *echo.Context) error { - r.JSON(c.Response().Writer(), http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - return nil - }) - - e.Run(":3000") -} -~~~ - -### [Gin](https://github.com/gin-gonic/gin) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/gin-gonic/gin" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - router := gin.Default() - - router.GET("/", func(c *gin.Context) { - r.JSON(c.Writer, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - router.Run(":3000") -} -~~~ - -### [Goji](https://github.com/zenazn/goji) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/zenazn/goji" - "github.com/zenazn/goji/web" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - goji.Get("/", func(c web.C, w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - goji.Serve() // Defaults to ":8000". -} -~~~ - -### [Negroni](https://github.com/codegangsta/negroni) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/codegangsta/negroni" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - n := negroni.Classic() - n.UseHandler(mux) - n.Run(":3000") -} -~~~ - -### [Traffic](https://github.com/pilu/traffic/) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/pilu/traffic" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - router := traffic.New() - router.Get("/", func(w traffic.ResponseWriter, req *traffic.Request) { - r.JSON(w, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - router.Run() -} -~~~ - -### [Web.go](https://github.com/hoisie/web) -~~~ go -// main.go -package main - -import ( - "net/http" - - "github.com/hoisie/web" - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" -) - -func main() { - r := render.New(render.Options{ - IndentJSON: true, - }) - - web.Get("/(.*)", func(ctx *web.Context, val string) { - r.JSON(ctx, http.StatusOK, map[string]string{"welcome": "This is rendered JSON!"}) - }) - - web.Run(":3000") -} -~~~ diff --git a/Godeps/_workspace/src/github.com/unrolled/render/buffer.go b/Godeps/_workspace/src/github.com/unrolled/render/buffer.go deleted file mode 100644 index 29dac7361..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/buffer.go +++ /dev/null @@ -1,41 +0,0 @@ -package render - -import "bytes" - -// bufPool represents a reusable buffer pool for executing templates into. -var bufPool *BufferPool - -// BufferPool implements a pool of bytes.Buffers in the form of a bounded channel. -// Pulled from the github.com/oxtoacart/bpool package (Apache licensed). -type BufferPool struct { - c chan *bytes.Buffer -} - -// NewBufferPool creates a new BufferPool bounded to the given size. -func NewBufferPool(size int) (bp *BufferPool) { - return &BufferPool{ - c: make(chan *bytes.Buffer, size), - } -} - -// Get gets a Buffer from the BufferPool, or creates a new one if none are -// available in the pool. -func (bp *BufferPool) Get() (b *bytes.Buffer) { - select { - case b = <-bp.c: - // reuse existing buffer - default: - // create new buffer - b = bytes.NewBuffer([]byte{}) - } - return -} - -// Put returns the given Buffer to the BufferPool. -func (bp *BufferPool) Put(b *bytes.Buffer) { - b.Reset() - select { - case bp.c <- b: - default: // Discard the buffer if the pool is full. - } -} diff --git a/Godeps/_workspace/src/github.com/unrolled/render/doc.go b/Godeps/_workspace/src/github.com/unrolled/render/doc.go deleted file mode 100644 index 2960bf3cf..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/doc.go +++ /dev/null @@ -1,55 +0,0 @@ -/*Package render is a package that provides functionality for easily rendering JSON, XML, binary data, and HTML templates. - - package main - - import ( - "encoding/xml" - "net/http" - - "github.com/unrolled/render" // or "gopkg.in/unrolled/render.v1" - ) - - type ExampleXml struct { - XMLName xml.Name `xml:"example"` - One string `xml:"one,attr"` - Two string `xml:"two,attr"` - } - - func main() { - r := render.New() - mux := http.NewServeMux() - - mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - w.Write([]byte("Welcome, visit sub pages now.")) - }) - - mux.HandleFunc("/data", func(w http.ResponseWriter, req *http.Request) { - r.Data(w, http.StatusOK, []byte("Some binary data here.")) - }) - - mux.HandleFunc("/text", func(w http.ResponseWriter, req *http.Request) { - r.Text(w, http.StatusOK, "Plain text here") - }) - - mux.HandleFunc("/json", func(w http.ResponseWriter, req *http.Request) { - r.JSON(w, http.StatusOK, map[string]string{"hello": "json"}) - }) - - mux.HandleFunc("/jsonp", func(w http.ResponseWriter, req *http.Request) { - r.JSONP(w, http.StatusOK, "callbackName", map[string]string{"hello": "jsonp"}) - }) - - mux.HandleFunc("/xml", func(w http.ResponseWriter, req *http.Request) { - r.XML(w, http.StatusOK, ExampleXml{One: "hello", Two: "xml"}) - }) - - mux.HandleFunc("/html", func(w http.ResponseWriter, req *http.Request) { - // Assumes you have a template in ./templates called "example.tmpl" - // $ mkdir -p templates && echo "

Hello HTML world.

" > templates/example.tmpl - r.HTML(w, http.StatusOK, "example", nil) - }) - - http.ListenAndServe("0.0.0.0:3000", mux) - } -*/ -package render diff --git a/Godeps/_workspace/src/github.com/unrolled/render/engine.go b/Godeps/_workspace/src/github.com/unrolled/render/engine.go deleted file mode 100644 index 92da1399e..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/engine.go +++ /dev/null @@ -1,202 +0,0 @@ -package render - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "html/template" - "net/http" -) - -// Engine is the generic interface for all responses. -type Engine interface { - Render(http.ResponseWriter, interface{}) error -} - -// Head defines the basic ContentType and Status fields. -type Head struct { - ContentType string - Status int -} - -// Data built-in renderer. -type Data struct { - Head -} - -// HTML built-in renderer. -type HTML struct { - Head - Name string - Templates *template.Template -} - -// JSON built-in renderer. -type JSON struct { - Head - Indent bool - UnEscapeHTML bool - Prefix []byte - StreamingJSON bool -} - -// JSONP built-in renderer. -type JSONP struct { - Head - Indent bool - Callback string -} - -// Text built-in renderer. -type Text struct { - Head -} - -// XML built-in renderer. -type XML struct { - Head - Indent bool - Prefix []byte -} - -// Write outputs the header content. -func (h Head) Write(w http.ResponseWriter) { - w.Header().Set(ContentType, h.ContentType) - w.WriteHeader(h.Status) -} - -// Render a data response. -func (d Data) Render(w http.ResponseWriter, v interface{}) error { - c := w.Header().Get(ContentType) - if c != "" { - d.Head.ContentType = c - } - - d.Head.Write(w) - w.Write(v.([]byte)) - return nil -} - -// Render a HTML response. -func (h HTML) Render(w http.ResponseWriter, binding interface{}) error { - // Retrieve a buffer from the pool to write to. - out := bufPool.Get() - err := h.Templates.ExecuteTemplate(out, h.Name, binding) - if err != nil { - return err - } - - h.Head.Write(w) - out.WriteTo(w) - - // Return the buffer to the pool. - bufPool.Put(out) - return nil -} - -// Render a JSON response. -func (j JSON) Render(w http.ResponseWriter, v interface{}) error { - if j.StreamingJSON { - return j.renderStreamingJSON(w, v) - } - - var result []byte - var err error - - if j.Indent { - result, err = json.MarshalIndent(v, "", " ") - result = append(result, '\n') - } else { - result, err = json.Marshal(v) - } - if err != nil { - return err - } - - // Unescape HTML if needed. - if j.UnEscapeHTML { - result = bytes.Replace(result, []byte("\\u003c"), []byte("<"), -1) - result = bytes.Replace(result, []byte("\\u003e"), []byte(">"), -1) - result = bytes.Replace(result, []byte("\\u0026"), []byte("&"), -1) - } - - // JSON marshaled fine, write out the result. - j.Head.Write(w) - if len(j.Prefix) > 0 { - w.Write(j.Prefix) - } - w.Write(result) - return nil -} - -func (j JSON) renderStreamingJSON(w http.ResponseWriter, v interface{}) error { - j.Head.Write(w) - if len(j.Prefix) > 0 { - w.Write(j.Prefix) - } - - return json.NewEncoder(w).Encode(v) -} - -// Render a JSONP response. -func (j JSONP) Render(w http.ResponseWriter, v interface{}) error { - var result []byte - var err error - - if j.Indent { - result, err = json.MarshalIndent(v, "", " ") - } else { - result, err = json.Marshal(v) - } - if err != nil { - return err - } - - // JSON marshaled fine, write out the result. - j.Head.Write(w) - w.Write([]byte(j.Callback + "(")) - w.Write(result) - w.Write([]byte(");")) - - // If indenting, append a new line. - if j.Indent { - w.Write([]byte("\n")) - } - return nil -} - -// Render a text response. -func (t Text) Render(w http.ResponseWriter, v interface{}) error { - c := w.Header().Get(ContentType) - if c != "" { - t.Head.ContentType = c - } - - t.Head.Write(w) - w.Write([]byte(v.(string))) - return nil -} - -// Render an XML response. -func (x XML) Render(w http.ResponseWriter, v interface{}) error { - var result []byte - var err error - - if x.Indent { - result, err = xml.MarshalIndent(v, "", " ") - result = append(result, '\n') - } else { - result, err = xml.Marshal(v) - } - if err != nil { - return err - } - - // XML marshaled fine, write out the result. - x.Head.Write(w) - if len(x.Prefix) > 0 { - w.Write(x.Prefix) - } - w.Write(result) - return nil -} diff --git a/Godeps/_workspace/src/github.com/unrolled/render/engine_integration_test.go b/Godeps/_workspace/src/github.com/unrolled/render/engine_integration_test.go deleted file mode 100644 index f280d2f0e..000000000 --- a/Godeps/_workspace/src/github.com/unrolled/render/engine_integration_test.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build integration - -package render - -import ( - "html/template" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/eknkc/amber" -) - -// go test -tags=integration - -type Amber struct { - Head - Template *template.Template -} - -func (a Amber) Render(w http.ResponseWriter, v interface{}) error { - a.Head.Write(w) - return a.Template.Execute(w, v) -} - -func TestRenderAmberTemplate(t *testing.T) { - dir := "fixtures/amber/" - render := New(Options{}) - - templates, err := amber.CompileDir(dir, amber.DefaultDirOptions, amber.DefaultOptions) - if err != nil { - t.Errorf("Could not compile Amber templates at " + dir) - } - - a := Amber{ - Head: Head{ - ContentType: ContentHTML, - Status: http.StatusOK, - }, - Template: templates["example"], - } - - v := struct { - VarOne string - VarTwo string - }{ - "Contact", - "Content!", - } - - h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - render.Render(w, a, v) - }) - - res := httptest.NewRecorder() - req, _ := http.NewRequest("GET", "/foo", nil) - h.ServeHTTP(res, req) - - body := res.Body.String() - - checkCompile := strings.Index(body, `