add GDrive provider support (#118)
* GDrive provider support * More reliable basedir ownership * Fix mimetype
This commit is contained in:
committed by
Remco Verhoef
parent
d0c7241b31
commit
82493d6dcb
46
vendor/google.golang.org/api/gensupport/backoff.go
generated
vendored
Normal file
46
vendor/google.golang.org/api/gensupport/backoff.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BackoffStrategy interface {
|
||||
// Pause returns the duration of the next pause and true if the operation should be
|
||||
// retried, or false if no further retries should be attempted.
|
||||
Pause() (time.Duration, bool)
|
||||
|
||||
// Reset restores the strategy to its initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
|
||||
// The initial pause time is given by Base.
|
||||
// Once the total pause time exceeds Max, Pause will indicate no further retries.
|
||||
type ExponentialBackoff struct {
|
||||
Base time.Duration
|
||||
Max time.Duration
|
||||
total time.Duration
|
||||
n uint
|
||||
}
|
||||
|
||||
func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
|
||||
if eb.total > eb.Max {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// The next pause is selected from randomly from [0, 2^n * Base).
|
||||
d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
|
||||
eb.total += d
|
||||
eb.n++
|
||||
return d, true
|
||||
}
|
||||
|
||||
func (eb *ExponentialBackoff) Reset() {
|
||||
eb.n = 0
|
||||
eb.total = 0
|
||||
}
|
46
vendor/google.golang.org/api/gensupport/backoff_test.go
generated
vendored
Normal file
46
vendor/google.golang.org/api/gensupport/backoff_test.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestBackoff(t *testing.T) {
|
||||
eb := &ExponentialBackoff{Base: time.Millisecond, Max: time.Second}
|
||||
|
||||
var total time.Duration
|
||||
for n, max := 0, 2*time.Millisecond; ; n, max = n+1, max*2 {
|
||||
if n > 100 {
|
||||
// There's less than 1 in 10^28 of taking longer than 100 iterations,
|
||||
// so this is just to check we don't have an infinite loop.
|
||||
t.Fatalf("Failed to timeout after 100 iterations.")
|
||||
}
|
||||
pause, retry := eb.Pause()
|
||||
if !retry {
|
||||
break
|
||||
}
|
||||
|
||||
if 0 > pause || pause >= max {
|
||||
t.Errorf("Iteration %d: pause = %v; want in range [0, %v)", n, pause, max)
|
||||
}
|
||||
total += pause
|
||||
}
|
||||
|
||||
if total < time.Second {
|
||||
t.Errorf("Total time = %v; want > %v", total, time.Second)
|
||||
}
|
||||
}
|
77
vendor/google.golang.org/api/gensupport/buffer.go
generated
vendored
Normal file
77
vendor/google.golang.org/api/gensupport/buffer.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
|
||||
type MediaBuffer struct {
|
||||
media io.Reader
|
||||
|
||||
chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
|
||||
err error // Any error generated when populating chunk by reading media.
|
||||
|
||||
// The absolute position of chunk in the underlying media.
|
||||
off int64
|
||||
}
|
||||
|
||||
func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer {
|
||||
return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
|
||||
}
|
||||
|
||||
// Chunk returns the current buffered chunk, the offset in the underlying media
|
||||
// from which the chunk is drawn, and the size of the chunk.
|
||||
// Successive calls to Chunk return the same chunk between calls to Next.
|
||||
func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
|
||||
// There may already be data in chunk if Next has not been called since the previous call to Chunk.
|
||||
if mb.err == nil && len(mb.chunk) == 0 {
|
||||
mb.err = mb.loadChunk()
|
||||
}
|
||||
return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err
|
||||
}
|
||||
|
||||
// loadChunk will read from media into chunk, up to the capacity of chunk.
|
||||
func (mb *MediaBuffer) loadChunk() error {
|
||||
bufSize := cap(mb.chunk)
|
||||
mb.chunk = mb.chunk[:bufSize]
|
||||
|
||||
read := 0
|
||||
var err error
|
||||
for err == nil && read < bufSize {
|
||||
var n int
|
||||
n, err = mb.media.Read(mb.chunk[read:])
|
||||
read += n
|
||||
}
|
||||
mb.chunk = mb.chunk[:read]
|
||||
return err
|
||||
}
|
||||
|
||||
// Next advances to the next chunk, which will be returned by the next call to Chunk.
|
||||
// Calls to Next without a corresponding prior call to Chunk will have no effect.
|
||||
func (mb *MediaBuffer) Next() {
|
||||
mb.off += int64(len(mb.chunk))
|
||||
mb.chunk = mb.chunk[0:0]
|
||||
}
|
||||
|
||||
type readerTyper struct {
|
||||
io.Reader
|
||||
googleapi.ContentTyper
|
||||
}
|
||||
|
||||
// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
|
||||
// If ra implements googleapi.ContentTyper, then the returned reader
|
||||
// will also implement googleapi.ContentTyper, delegating to ra.
|
||||
func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
|
||||
r := io.NewSectionReader(ra, 0, size)
|
||||
if typer, ok := ra.(googleapi.ContentTyper); ok {
|
||||
return readerTyper{r, typer}
|
||||
}
|
||||
return r
|
||||
}
|
296
vendor/google.golang.org/api/gensupport/buffer_test.go
generated
vendored
Normal file
296
vendor/google.golang.org/api/gensupport/buffer_test.go
generated
vendored
Normal file
@@ -0,0 +1,296 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"testing"
|
||||
"testing/iotest"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// getChunkAsString reads a chunk from mb, but does not call Next.
|
||||
func getChunkAsString(t *testing.T, mb *MediaBuffer) (string, error) {
|
||||
chunk, _, size, err := mb.Chunk()
|
||||
|
||||
buf, e := ioutil.ReadAll(chunk)
|
||||
if e != nil {
|
||||
t.Fatalf("Failed reading chunk: %v", e)
|
||||
}
|
||||
if size != len(buf) {
|
||||
t.Fatalf("reported chunk size doesn't match actual chunk size: got: %v; want: %v", size, len(buf))
|
||||
}
|
||||
return string(buf), err
|
||||
}
|
||||
|
||||
func TestChunking(t *testing.T) {
|
||||
type testCase struct {
|
||||
data string // the data to read from the Reader
|
||||
finalErr error // error to return after data has been read
|
||||
chunkSize int
|
||||
wantChunks []string
|
||||
}
|
||||
|
||||
for _, singleByteReads := range []bool{true, false} {
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
data: "abcdefg",
|
||||
finalErr: nil,
|
||||
chunkSize: 3,
|
||||
wantChunks: []string{"abc", "def", "g"},
|
||||
},
|
||||
{
|
||||
data: "abcdefg",
|
||||
finalErr: nil,
|
||||
chunkSize: 1,
|
||||
wantChunks: []string{"a", "b", "c", "d", "e", "f", "g"},
|
||||
},
|
||||
{
|
||||
data: "abcdefg",
|
||||
finalErr: nil,
|
||||
chunkSize: 7,
|
||||
wantChunks: []string{"abcdefg"},
|
||||
},
|
||||
{
|
||||
data: "abcdefg",
|
||||
finalErr: nil,
|
||||
chunkSize: 8,
|
||||
wantChunks: []string{"abcdefg"},
|
||||
},
|
||||
{
|
||||
data: "abcdefg",
|
||||
finalErr: io.ErrUnexpectedEOF,
|
||||
chunkSize: 3,
|
||||
wantChunks: []string{"abc", "def", "g"},
|
||||
},
|
||||
{
|
||||
data: "abcdefg",
|
||||
finalErr: io.ErrUnexpectedEOF,
|
||||
chunkSize: 8,
|
||||
wantChunks: []string{"abcdefg"},
|
||||
},
|
||||
} {
|
||||
var r io.Reader = &errReader{buf: []byte(tc.data), err: tc.finalErr}
|
||||
|
||||
if singleByteReads {
|
||||
r = iotest.OneByteReader(r)
|
||||
}
|
||||
|
||||
mb := NewMediaBuffer(r, tc.chunkSize)
|
||||
var gotErr error
|
||||
got := []string{}
|
||||
for {
|
||||
chunk, err := getChunkAsString(t, mb)
|
||||
if len(chunk) != 0 {
|
||||
got = append(got, string(chunk))
|
||||
}
|
||||
if err != nil {
|
||||
gotErr = err
|
||||
break
|
||||
}
|
||||
mb.Next()
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, tc.wantChunks) {
|
||||
t.Errorf("Failed reading buffer: got: %v; want:%v", got, tc.wantChunks)
|
||||
}
|
||||
|
||||
expectedErr := tc.finalErr
|
||||
if expectedErr == nil {
|
||||
expectedErr = io.EOF
|
||||
}
|
||||
if gotErr != expectedErr {
|
||||
t.Errorf("Reading buffer error: got: %v; want: %v", gotErr, expectedErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestChunkCanBeReused(t *testing.T) {
|
||||
er := &errReader{buf: []byte("abcdefg")}
|
||||
mb := NewMediaBuffer(er, 3)
|
||||
|
||||
// expectChunk reads a chunk and checks that it got what was wanted.
|
||||
expectChunk := func(want string, wantErr error) {
|
||||
got, err := getChunkAsString(t, mb)
|
||||
if err != wantErr {
|
||||
t.Errorf("error reading buffer: got: %v; want: %v", err, wantErr)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Failed reading buffer: got: %q; want:%q", got, want)
|
||||
}
|
||||
}
|
||||
expectChunk("abc", nil)
|
||||
// On second call, should get same chunk again.
|
||||
expectChunk("abc", nil)
|
||||
mb.Next()
|
||||
expectChunk("def", nil)
|
||||
expectChunk("def", nil)
|
||||
mb.Next()
|
||||
expectChunk("g", io.EOF)
|
||||
expectChunk("g", io.EOF)
|
||||
mb.Next()
|
||||
expectChunk("", io.EOF)
|
||||
}
|
||||
|
||||
func TestPos(t *testing.T) {
|
||||
er := &errReader{buf: []byte("abcdefg")}
|
||||
mb := NewMediaBuffer(er, 3)
|
||||
|
||||
expectChunkAtOffset := func(want int64, wantErr error) {
|
||||
_, off, _, err := mb.Chunk()
|
||||
if err != wantErr {
|
||||
t.Errorf("error reading buffer: got: %v; want: %v", err, wantErr)
|
||||
}
|
||||
if got := off; got != want {
|
||||
t.Errorf("resumable buffer Pos: got: %v; want: %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// We expect the first chunk to be at offset 0.
|
||||
expectChunkAtOffset(0, nil)
|
||||
// Fetching the same chunk should return the same offset.
|
||||
expectChunkAtOffset(0, nil)
|
||||
|
||||
// Calling Next multiple times should only cause off to advance by 3, since off is not advanced until
|
||||
// the chunk is actually read.
|
||||
mb.Next()
|
||||
mb.Next()
|
||||
expectChunkAtOffset(3, nil)
|
||||
|
||||
mb.Next()
|
||||
|
||||
// Load the final 1-byte chunk.
|
||||
expectChunkAtOffset(6, io.EOF)
|
||||
|
||||
// Next will advance 1 byte. But there are no more chunks, so off will not increase beyond 7.
|
||||
mb.Next()
|
||||
expectChunkAtOffset(7, io.EOF)
|
||||
mb.Next()
|
||||
expectChunkAtOffset(7, io.EOF)
|
||||
}
|
||||
|
||||
// bytes.Reader implements both Reader and ReaderAt. The following types
|
||||
// implement various combinations of Reader, ReaderAt and ContentTyper, by
|
||||
// wrapping bytes.Reader. All implement at least ReaderAt, so they can be
|
||||
// passed to ReaderAtToReader. The following table summarizes which types
|
||||
// implement which interfaces:
|
||||
//
|
||||
// ReaderAt Reader ContentTyper
|
||||
// reader x x
|
||||
// typerReader x x x
|
||||
// readerAt x
|
||||
// typerReaderAt x x
|
||||
|
||||
// reader implements Reader, in addition to ReaderAt.
|
||||
type reader struct {
|
||||
r *bytes.Reader
|
||||
}
|
||||
|
||||
func (r *reader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return r.r.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (r *reader) Read(b []byte) (n int, err error) {
|
||||
return r.r.Read(b)
|
||||
}
|
||||
|
||||
// typerReader implements Reader and ContentTyper, in addition to ReaderAt.
|
||||
type typerReader struct {
|
||||
r *bytes.Reader
|
||||
}
|
||||
|
||||
func (tr *typerReader) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return tr.r.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (tr *typerReader) Read(b []byte) (n int, err error) {
|
||||
return tr.r.Read(b)
|
||||
}
|
||||
|
||||
func (tr *typerReader) ContentType() string {
|
||||
return "ctype"
|
||||
}
|
||||
|
||||
// readerAt implements only ReaderAt.
|
||||
type readerAt struct {
|
||||
r *bytes.Reader
|
||||
}
|
||||
|
||||
func (ra *readerAt) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return ra.r.ReadAt(b, off)
|
||||
}
|
||||
|
||||
// typerReaderAt implements ContentTyper, in addition to ReaderAt.
|
||||
type typerReaderAt struct {
|
||||
r *bytes.Reader
|
||||
}
|
||||
|
||||
func (tra *typerReaderAt) ReadAt(b []byte, off int64) (n int, err error) {
|
||||
return tra.r.ReadAt(b, off)
|
||||
}
|
||||
|
||||
func (tra *typerReaderAt) ContentType() string {
|
||||
return "ctype"
|
||||
}
|
||||
|
||||
func TestAdapter(t *testing.T) {
|
||||
data := "abc"
|
||||
|
||||
checkConversion := func(to io.Reader, wantTyper bool) {
|
||||
if _, ok := to.(googleapi.ContentTyper); ok != wantTyper {
|
||||
t.Errorf("reader implements typer? got: %v; want: %v", ok, wantTyper)
|
||||
}
|
||||
if typer, ok := to.(googleapi.ContentTyper); ok && typer.ContentType() != "ctype" {
|
||||
t.Errorf("content type: got: %s; want: ctype", typer.ContentType())
|
||||
}
|
||||
buf, err := ioutil.ReadAll(to)
|
||||
if err != nil {
|
||||
t.Errorf("error reading data: %v", err)
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(buf, []byte(data)) {
|
||||
t.Errorf("failed reading data: got: %s; want: %s", buf, data)
|
||||
}
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
from io.ReaderAt
|
||||
wantTyper bool
|
||||
}
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
from: &reader{bytes.NewReader([]byte(data))},
|
||||
wantTyper: false,
|
||||
},
|
||||
{
|
||||
// Reader and ContentTyper
|
||||
from: &typerReader{bytes.NewReader([]byte(data))},
|
||||
wantTyper: true,
|
||||
},
|
||||
{
|
||||
// ReaderAt
|
||||
from: &readerAt{bytes.NewReader([]byte(data))},
|
||||
wantTyper: false,
|
||||
},
|
||||
{
|
||||
// ReaderAt and ContentTyper
|
||||
from: &typerReaderAt{bytes.NewReader([]byte(data))},
|
||||
wantTyper: true,
|
||||
},
|
||||
} {
|
||||
to := ReaderAtToReader(tc.from, int64(len(data)))
|
||||
checkConversion(to, tc.wantTyper)
|
||||
// tc.from is a ReaderAt, and should be treated like one, even
|
||||
// if it also implements Reader. Specifically, it can be
|
||||
// reused and read from the beginning.
|
||||
to = ReaderAtToReader(tc.from, int64(len(data)))
|
||||
checkConversion(to, tc.wantTyper)
|
||||
}
|
||||
}
|
10
vendor/google.golang.org/api/gensupport/doc.go
generated
vendored
Normal file
10
vendor/google.golang.org/api/gensupport/doc.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gensupport is an internal implementation detail used by code
|
||||
// generated by the google-api-go-generator tool.
|
||||
//
|
||||
// This package may be modified at any time without regard for backwards
|
||||
// compatibility. It should not be used directly by API users.
|
||||
package gensupport
|
17
vendor/google.golang.org/api/gensupport/go18.go
generated
vendored
Normal file
17
vendor/google.golang.org/api/gensupport/go18.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// SetGetBody sets the GetBody field of req to f.
|
||||
func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
|
||||
req.GetBody = f
|
||||
}
|
22
vendor/google.golang.org/api/gensupport/header.go
generated
vendored
Normal file
22
vendor/google.golang.org/api/gensupport/header.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GoogleClientHeader returns the value to use for the x-goog-api-client
|
||||
// header, which is used internally by Google.
|
||||
func GoogleClientHeader(generatorVersion, clientElement string) string {
|
||||
elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)}
|
||||
if clientElement != "" {
|
||||
elts = append(elts, clientElement)
|
||||
}
|
||||
elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion))
|
||||
return strings.Join(elts, " ")
|
||||
}
|
28
vendor/google.golang.org/api/gensupport/header_test.go
generated
vendored
Normal file
28
vendor/google.golang.org/api/gensupport/header_test.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGoogleClientHeader(t *testing.T) {
|
||||
const genVersion = "20170101"
|
||||
gv := strings.Replace(runtime.Version(), " ", "_", -1)
|
||||
got := GoogleClientHeader(genVersion, "gccl/xyz")
|
||||
want := fmt.Sprintf("gl-go/%s gccl/xyz gdcl/%s", gv, genVersion)
|
||||
if got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
|
||||
got = GoogleClientHeader(genVersion, "")
|
||||
want = fmt.Sprintf("gl-go/%s gdcl/%s", gv, genVersion)
|
||||
if got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
211
vendor/google.golang.org/api/gensupport/json.go
generated
vendored
Normal file
211
vendor/google.golang.org/api/gensupport/json.go
generated
vendored
Normal file
@@ -0,0 +1,211 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
|
||||
// A field is selected if any of the following is true:
|
||||
// * it has a non-empty value
|
||||
// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
|
||||
// * its field name is present in nullFields.
|
||||
// The JSON key for each selected field is taken from the field's json: struct tag.
|
||||
func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 && len(nullFields) == 0 {
|
||||
return json.Marshal(schema)
|
||||
}
|
||||
|
||||
mustInclude := make(map[string]bool)
|
||||
for _, f := range forceSendFields {
|
||||
mustInclude[f] = true
|
||||
}
|
||||
useNull := make(map[string]bool)
|
||||
useNullMaps := make(map[string]map[string]bool)
|
||||
for _, nf := range nullFields {
|
||||
parts := strings.SplitN(nf, ".", 2)
|
||||
field := parts[0]
|
||||
if len(parts) == 1 {
|
||||
useNull[field] = true
|
||||
} else {
|
||||
if useNullMaps[field] == nil {
|
||||
useNullMaps[field] = map[string]bool{}
|
||||
}
|
||||
useNullMaps[field][parts[1]] = true
|
||||
}
|
||||
}
|
||||
|
||||
dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(dataMap)
|
||||
}
|
||||
|
||||
func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
s := reflect.ValueOf(schema)
|
||||
st := s.Type()
|
||||
|
||||
for i := 0; i < s.NumField(); i++ {
|
||||
jsonTag := st.Field(i).Tag.Get("json")
|
||||
if jsonTag == "" {
|
||||
continue
|
||||
}
|
||||
tag, err := parseJSONTag(jsonTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tag.ignore {
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.Field(i)
|
||||
f := st.Field(i)
|
||||
|
||||
if useNull[f.Name] {
|
||||
if !isEmptyValue(v) {
|
||||
return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
|
||||
}
|
||||
m[tag.apiName] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
if !includeField(v, f, mustInclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If map fields are explicitly set to null, use a map[string]interface{}.
|
||||
if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
|
||||
ms, ok := v.Interface().(map[string]string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
|
||||
}
|
||||
mi := map[string]interface{}{}
|
||||
for k, v := range ms {
|
||||
mi[k] = v
|
||||
}
|
||||
for k := range useNullMaps[f.Name] {
|
||||
mi[k] = nil
|
||||
}
|
||||
m[tag.apiName] = mi
|
||||
continue
|
||||
}
|
||||
|
||||
// nil maps are treated as empty maps.
|
||||
if f.Type.Kind() == reflect.Map && v.IsNil() {
|
||||
m[tag.apiName] = map[string]string{}
|
||||
continue
|
||||
}
|
||||
|
||||
// nil slices are treated as empty slices.
|
||||
if f.Type.Kind() == reflect.Slice && v.IsNil() {
|
||||
m[tag.apiName] = []bool{}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag.stringFormat {
|
||||
m[tag.apiName] = formatAsString(v, f.Type.Kind())
|
||||
} else {
|
||||
m[tag.apiName] = v.Interface()
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// formatAsString returns a string representation of v, dereferencing it first if possible.
|
||||
func formatAsString(v reflect.Value, kind reflect.Kind) string {
|
||||
if kind == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", v.Interface())
|
||||
}
|
||||
|
||||
// jsonTag represents a restricted version of the struct tag format used by encoding/json.
|
||||
// It is used to describe the JSON encoding of fields in a Schema struct.
|
||||
type jsonTag struct {
|
||||
apiName string
|
||||
stringFormat bool
|
||||
ignore bool
|
||||
}
|
||||
|
||||
// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
|
||||
// The format of the tag must match that generated by the Schema.writeSchemaStruct method
|
||||
// in the api generator.
|
||||
func parseJSONTag(val string) (jsonTag, error) {
|
||||
if val == "-" {
|
||||
return jsonTag{ignore: true}, nil
|
||||
}
|
||||
|
||||
var tag jsonTag
|
||||
|
||||
i := strings.Index(val, ",")
|
||||
if i == -1 || val[:i] == "" {
|
||||
return tag, fmt.Errorf("malformed json tag: %s", val)
|
||||
}
|
||||
|
||||
tag = jsonTag{
|
||||
apiName: val[:i],
|
||||
}
|
||||
|
||||
switch val[i+1:] {
|
||||
case "omitempty":
|
||||
case "omitempty,string":
|
||||
tag.stringFormat = true
|
||||
default:
|
||||
return tag, fmt.Errorf("malformed json tag: %s", val)
|
||||
}
|
||||
|
||||
return tag, nil
|
||||
}
|
||||
|
||||
// Reports whether the struct field "f" with value "v" should be included in JSON output.
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
|
||||
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
|
||||
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
|
||||
// However, many fields are not pointers, so there would be no way to delete these fields.
|
||||
// Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
|
||||
// Deletion will be handled by a separate mechanism.
|
||||
if f.Type.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
||||
// The "any" type is represented as an interface{}. If this interface
|
||||
// is nil, there is no reasonable representation to send. We ignore
|
||||
// these fields, for the same reasons as given above for pointers.
|
||||
if f.Type.Kind() == reflect.Interface && v.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
||||
return mustInclude[f.Name] || !isEmptyValue(v)
|
||||
}
|
||||
|
||||
// isEmptyValue reports whether v is the empty value for its type. This
|
||||
// implementation is based on that of the encoding/json package, but its
|
||||
// correctness does not depend on it being identical. What's important is that
|
||||
// this function return false in situations where v should not be sent as part
|
||||
// of a PATCH operation.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
516
vendor/google.golang.org/api/gensupport/json_test.go
generated
vendored
Normal file
516
vendor/google.golang.org/api/gensupport/json_test.go
generated
vendored
Normal file
@@ -0,0 +1,516 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
type schema struct {
|
||||
// Basic types
|
||||
B bool `json:"b,omitempty"`
|
||||
F float64 `json:"f,omitempty"`
|
||||
I int64 `json:"i,omitempty"`
|
||||
Istr int64 `json:"istr,omitempty,string"`
|
||||
Str string `json:"str,omitempty"`
|
||||
|
||||
// Pointers to basic types
|
||||
PB *bool `json:"pb,omitempty"`
|
||||
PF *float64 `json:"pf,omitempty"`
|
||||
PI *int64 `json:"pi,omitempty"`
|
||||
PIStr *int64 `json:"pistr,omitempty,string"`
|
||||
PStr *string `json:"pstr,omitempty"`
|
||||
|
||||
// Other types
|
||||
Int64s googleapi.Int64s `json:"i64s,omitempty"`
|
||||
S []int `json:"s,omitempty"`
|
||||
M map[string]string `json:"m,omitempty"`
|
||||
Any interface{} `json:"any,omitempty"`
|
||||
Child *child `json:"child,omitempty"`
|
||||
MapToAnyArray map[string][]interface{} `json:"maptoanyarray,omitempty"`
|
||||
|
||||
ForceSendFields []string `json:"-"`
|
||||
NullFields []string `json:"-"`
|
||||
}
|
||||
|
||||
type child struct {
|
||||
B bool `json:"childbool,omitempty"`
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
s schema
|
||||
want string
|
||||
}
|
||||
|
||||
func TestBasics(t *testing.T) {
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
s: schema{},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
ForceSendFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"},
|
||||
},
|
||||
want: `{"b":false,"f":0.0,"i":0,"istr":"0","str":""}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"},
|
||||
},
|
||||
want: `{"b":null,"f":null,"i":null,"istr":null,"str":null,"pb":null,"pf":null,"pi":null,"pistr":null,"pstr":null}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
B: true,
|
||||
F: 1.2,
|
||||
I: 1,
|
||||
Istr: 2,
|
||||
Str: "a",
|
||||
PB: googleapi.Bool(true),
|
||||
PF: googleapi.Float64(1.2),
|
||||
PI: googleapi.Int64(int64(1)),
|
||||
PIStr: googleapi.Int64(int64(2)),
|
||||
PStr: googleapi.String("a"),
|
||||
},
|
||||
want: `{"b":true,"f":1.2,"i":1,"istr":"2","str":"a","pb":true,"pf":1.2,"pi":1,"pistr":"2","pstr":"a"}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
B: false,
|
||||
F: 0.0,
|
||||
I: 0,
|
||||
Istr: 0,
|
||||
Str: "",
|
||||
PB: googleapi.Bool(false),
|
||||
PF: googleapi.Float64(0.0),
|
||||
PI: googleapi.Int64(int64(0)),
|
||||
PIStr: googleapi.Int64(int64(0)),
|
||||
PStr: googleapi.String(""),
|
||||
},
|
||||
want: `{"pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
B: false,
|
||||
F: 0.0,
|
||||
I: 0,
|
||||
Istr: 0,
|
||||
Str: "",
|
||||
PB: googleapi.Bool(false),
|
||||
PF: googleapi.Float64(0.0),
|
||||
PI: googleapi.Int64(int64(0)),
|
||||
PIStr: googleapi.Int64(int64(0)),
|
||||
PStr: googleapi.String(""),
|
||||
ForceSendFields: []string{"B", "F", "I", "Istr", "Str", "PB", "PF", "PI", "PIStr", "PStr"},
|
||||
},
|
||||
want: `{"b":false,"f":0.0,"i":0,"istr":"0","str":"","pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
B: false,
|
||||
F: 0.0,
|
||||
I: 0,
|
||||
Istr: 0,
|
||||
Str: "",
|
||||
PB: googleapi.Bool(false),
|
||||
PF: googleapi.Float64(0.0),
|
||||
PI: googleapi.Int64(int64(0)),
|
||||
PIStr: googleapi.Int64(int64(0)),
|
||||
PStr: googleapi.String(""),
|
||||
NullFields: []string{"B", "F", "I", "Istr", "Str"},
|
||||
},
|
||||
want: `{"b":null,"f":null,"i":null,"istr":null,"str":null,"pb":false,"pf":0.0,"pi":0,"pistr":"0","pstr":""}`,
|
||||
},
|
||||
} {
|
||||
checkMarshalJSON(t, tc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSliceFields(t *testing.T) {
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
s: schema{},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{S: []int{}, Int64s: googleapi.Int64s{}},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{S: []int{1}, Int64s: googleapi.Int64s{1}},
|
||||
want: `{"s":[1],"i64s":["1"]}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
ForceSendFields: []string{"S", "Int64s"},
|
||||
},
|
||||
want: `{"s":[],"i64s":[]}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
S: []int{},
|
||||
Int64s: googleapi.Int64s{},
|
||||
ForceSendFields: []string{"S", "Int64s"},
|
||||
},
|
||||
want: `{"s":[],"i64s":[]}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
S: []int{1},
|
||||
Int64s: googleapi.Int64s{1},
|
||||
ForceSendFields: []string{"S", "Int64s"},
|
||||
},
|
||||
want: `{"s":[1],"i64s":["1"]}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"S", "Int64s"},
|
||||
},
|
||||
want: `{"s":null,"i64s":null}`,
|
||||
},
|
||||
} {
|
||||
checkMarshalJSON(t, tc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapField(t *testing.T) {
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
s: schema{},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{M: make(map[string]string)},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{M: map[string]string{"a": "b"}},
|
||||
want: `{"m":{"a":"b"}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
ForceSendFields: []string{"M"},
|
||||
},
|
||||
want: `{"m":{}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"M"},
|
||||
},
|
||||
want: `{"m":null}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
M: make(map[string]string),
|
||||
ForceSendFields: []string{"M"},
|
||||
},
|
||||
want: `{"m":{}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
M: make(map[string]string),
|
||||
NullFields: []string{"M"},
|
||||
},
|
||||
want: `{"m":null}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
M: map[string]string{"a": "b"},
|
||||
ForceSendFields: []string{"M"},
|
||||
},
|
||||
want: `{"m":{"a":"b"}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
M: map[string]string{"a": "b"},
|
||||
NullFields: []string{"M.a", "M."},
|
||||
},
|
||||
want: `{"m": {"a": null, "":null}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
M: map[string]string{"a": "b"},
|
||||
NullFields: []string{"M.c"},
|
||||
},
|
||||
want: `{"m": {"a": "b", "c": null}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"M.a"},
|
||||
ForceSendFields: []string{"M"},
|
||||
},
|
||||
want: `{"m": {"a": null}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"M.a"},
|
||||
},
|
||||
want: `{}`,
|
||||
},
|
||||
} {
|
||||
checkMarshalJSON(t, tc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMapToAnyArray(t *testing.T) {
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
s: schema{},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{MapToAnyArray: make(map[string][]interface{})},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
MapToAnyArray: map[string][]interface{}{
|
||||
"a": []interface{}{2, "b"},
|
||||
},
|
||||
},
|
||||
want: `{"maptoanyarray":{"a":[2, "b"]}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
MapToAnyArray: map[string][]interface{}{
|
||||
"a": nil,
|
||||
},
|
||||
},
|
||||
want: `{"maptoanyarray":{"a": null}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
MapToAnyArray: map[string][]interface{}{
|
||||
"a": []interface{}{nil},
|
||||
},
|
||||
},
|
||||
want: `{"maptoanyarray":{"a":[null]}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
ForceSendFields: []string{"MapToAnyArray"},
|
||||
},
|
||||
want: `{"maptoanyarray":{}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"MapToAnyArray"},
|
||||
},
|
||||
want: `{"maptoanyarray":null}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
MapToAnyArray: make(map[string][]interface{}),
|
||||
ForceSendFields: []string{"MapToAnyArray"},
|
||||
},
|
||||
want: `{"maptoanyarray":{}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
MapToAnyArray: map[string][]interface{}{
|
||||
"a": []interface{}{2, "b"},
|
||||
},
|
||||
ForceSendFields: []string{"MapToAnyArray"},
|
||||
},
|
||||
want: `{"maptoanyarray":{"a":[2, "b"]}}`,
|
||||
},
|
||||
} {
|
||||
checkMarshalJSON(t, tc)
|
||||
}
|
||||
}
|
||||
|
||||
type anyType struct {
|
||||
Field int
|
||||
}
|
||||
|
||||
func (a anyType) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"anyType value"`), nil
|
||||
}
|
||||
|
||||
func TestAnyField(t *testing.T) {
|
||||
// ForceSendFields has no effect on nil interfaces and interfaces that contain nil pointers.
|
||||
var nilAny *anyType
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
s: schema{},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{Any: nilAny},
|
||||
want: `{"any": null}`,
|
||||
},
|
||||
{
|
||||
s: schema{Any: &anyType{}},
|
||||
want: `{"any":"anyType value"}`,
|
||||
},
|
||||
{
|
||||
s: schema{Any: anyType{}},
|
||||
want: `{"any":"anyType value"}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
ForceSendFields: []string{"Any"},
|
||||
},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"Any"},
|
||||
},
|
||||
want: `{"any":null}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
Any: nilAny,
|
||||
ForceSendFields: []string{"Any"},
|
||||
},
|
||||
want: `{"any": null}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
Any: &anyType{},
|
||||
ForceSendFields: []string{"Any"},
|
||||
},
|
||||
want: `{"any":"anyType value"}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
Any: anyType{},
|
||||
ForceSendFields: []string{"Any"},
|
||||
},
|
||||
want: `{"any":"anyType value"}`,
|
||||
},
|
||||
} {
|
||||
checkMarshalJSON(t, tc)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSubschema(t *testing.T) {
|
||||
// Subschemas are always stored as pointers, so ForceSendFields has no effect on them.
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
s: schema{},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
ForceSendFields: []string{"Child"},
|
||||
},
|
||||
want: `{}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
NullFields: []string{"Child"},
|
||||
},
|
||||
want: `{"child":null}`,
|
||||
},
|
||||
{
|
||||
s: schema{Child: &child{}},
|
||||
want: `{"child":{}}`,
|
||||
},
|
||||
{
|
||||
s: schema{
|
||||
Child: &child{},
|
||||
ForceSendFields: []string{"Child"},
|
||||
},
|
||||
want: `{"child":{}}`,
|
||||
},
|
||||
{
|
||||
s: schema{Child: &child{B: true}},
|
||||
want: `{"child":{"childbool":true}}`,
|
||||
},
|
||||
|
||||
{
|
||||
s: schema{
|
||||
Child: &child{B: true},
|
||||
ForceSendFields: []string{"Child"},
|
||||
},
|
||||
want: `{"child":{"childbool":true}}`,
|
||||
},
|
||||
} {
|
||||
checkMarshalJSON(t, tc)
|
||||
}
|
||||
}
|
||||
|
||||
// checkMarshalJSON verifies that calling schemaToMap on tc.s yields a result which is equivalent to tc.want.
|
||||
func checkMarshalJSON(t *testing.T, tc testCase) {
|
||||
doCheckMarshalJSON(t, tc.s, tc.s.ForceSendFields, tc.s.NullFields, tc.want)
|
||||
if len(tc.s.ForceSendFields) == 0 && len(tc.s.NullFields) == 0 {
|
||||
// verify that the code path used when ForceSendFields and NullFields
|
||||
// are non-empty produces the same output as the fast path that is used
|
||||
// when they are empty.
|
||||
doCheckMarshalJSON(t, tc.s, []string{"dummy"}, []string{"dummy"}, tc.want)
|
||||
}
|
||||
}
|
||||
|
||||
func doCheckMarshalJSON(t *testing.T, s schema, forceSendFields, nullFields []string, wantJSON string) {
|
||||
encoded, err := MarshalJSON(s, forceSendFields, nullFields)
|
||||
if err != nil {
|
||||
t.Fatalf("encoding json:\n got err: %v", err)
|
||||
}
|
||||
|
||||
// The expected and obtained JSON can differ in field ordering, so unmarshal before comparing.
|
||||
var got interface{}
|
||||
var want interface{}
|
||||
err = json.Unmarshal(encoded, &got)
|
||||
if err != nil {
|
||||
t.Fatalf("decoding json:\n got err: %v", err)
|
||||
}
|
||||
err = json.Unmarshal([]byte(wantJSON), &want)
|
||||
if err != nil {
|
||||
t.Fatalf("decoding json:\n got err: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("schemaToMap:\ngot :%v\nwant: %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseJSONTag(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
tag string
|
||||
want jsonTag
|
||||
}{
|
||||
{
|
||||
tag: "-",
|
||||
want: jsonTag{ignore: true},
|
||||
}, {
|
||||
tag: "name,omitempty",
|
||||
want: jsonTag{apiName: "name"},
|
||||
}, {
|
||||
tag: "name,omitempty,string",
|
||||
want: jsonTag{apiName: "name", stringFormat: true},
|
||||
},
|
||||
} {
|
||||
got, err := parseJSONTag(tc.tag)
|
||||
if err != nil {
|
||||
t.Fatalf("parsing json:\n got err: %v\ntag: %q", err, tc.tag)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("parseJSONTage:\ngot :%v\nwant:%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestParseMalformedJSONTag(t *testing.T) {
|
||||
for _, tag := range []string{
|
||||
"",
|
||||
"name",
|
||||
"name,",
|
||||
"name,blah",
|
||||
"name,blah,string",
|
||||
",omitempty",
|
||||
",omitempty,string",
|
||||
"name,omitempty,string,blah",
|
||||
} {
|
||||
_, err := parseJSONTag(tag)
|
||||
if err == nil {
|
||||
t.Fatalf("parsing json: expected err, got nil for tag: %v", tag)
|
||||
}
|
||||
}
|
||||
}
|
57
vendor/google.golang.org/api/gensupport/jsonfloat.go
generated
vendored
Normal file
57
vendor/google.golang.org/api/gensupport/jsonfloat.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// JSONFloat64 is a float64 that supports proper unmarshaling of special float
|
||||
// values in JSON, according to
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
|
||||
// that is a proto-to-JSON spec, it applies to all Google APIs.
|
||||
//
|
||||
// The jsonpb package
|
||||
// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
|
||||
// similar functionality, but only for direct translation from proto messages
|
||||
// to JSON.
|
||||
type JSONFloat64 float64
|
||||
|
||||
func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
|
||||
var ff float64
|
||||
if err := json.Unmarshal(data, &ff); err == nil {
|
||||
*f = JSONFloat64(ff)
|
||||
return nil
|
||||
}
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err == nil {
|
||||
switch s {
|
||||
case "NaN":
|
||||
ff = math.NaN()
|
||||
case "Infinity":
|
||||
ff = math.Inf(1)
|
||||
case "-Infinity":
|
||||
ff = math.Inf(-1)
|
||||
default:
|
||||
return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
|
||||
}
|
||||
*f = JSONFloat64(ff)
|
||||
return nil
|
||||
}
|
||||
return errors.New("google.golang.org/api/internal: data not float or string")
|
||||
}
|
53
vendor/google.golang.org/api/gensupport/jsonfloat_test.go
generated
vendored
Normal file
53
vendor/google.golang.org/api/gensupport/jsonfloat_test.go
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestJSONFloat(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want float64
|
||||
}{
|
||||
{"0", 0},
|
||||
{"-10", -10},
|
||||
{"1e23", 1e23},
|
||||
{`"Infinity"`, math.Inf(1)},
|
||||
{`"-Infinity"`, math.Inf(-1)},
|
||||
{`"NaN"`, math.NaN()},
|
||||
} {
|
||||
var f64 JSONFloat64
|
||||
if err := json.Unmarshal([]byte(test.in), &f64); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := float64(f64)
|
||||
if got != test.want && math.IsNaN(got) != math.IsNaN(test.want) {
|
||||
t.Errorf("%s: got %f, want %f", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSONFloatErrors(t *testing.T) {
|
||||
var f64 JSONFloat64
|
||||
for _, in := range []string{"", "a", `"Inf"`, `"-Inf"`, `"nan"`, `"nana"`} {
|
||||
if err := json.Unmarshal([]byte(in), &f64); err == nil {
|
||||
t.Errorf("%q: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
336
vendor/google.golang.org/api/gensupport/media.go
generated
vendored
Normal file
336
vendor/google.golang.org/api/gensupport/media.go
generated
vendored
Normal file
@@ -0,0 +1,336 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const sniffBuffSize = 512
|
||||
|
||||
func newContentSniffer(r io.Reader) *contentSniffer {
|
||||
return &contentSniffer{r: r}
|
||||
}
|
||||
|
||||
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
|
||||
type contentSniffer struct {
|
||||
r io.Reader
|
||||
start []byte // buffer for the sniffed bytes.
|
||||
err error // set to any error encountered while reading bytes to be sniffed.
|
||||
|
||||
ctype string // set on first sniff.
|
||||
sniffed bool // set to true on first sniff.
|
||||
}
|
||||
|
||||
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
|
||||
// Ensure that the content type is sniffed before any data is consumed from Reader.
|
||||
_, _ = cs.ContentType()
|
||||
|
||||
if len(cs.start) > 0 {
|
||||
n := copy(p, cs.start)
|
||||
cs.start = cs.start[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// We may have read some bytes into start while sniffing, even if the read ended in an error.
|
||||
// We should first return those bytes, then the error.
|
||||
if cs.err != nil {
|
||||
return 0, cs.err
|
||||
}
|
||||
|
||||
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
|
||||
return cs.r.Read(p)
|
||||
}
|
||||
|
||||
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
|
||||
func (cs *contentSniffer) ContentType() (string, bool) {
|
||||
if cs.sniffed {
|
||||
return cs.ctype, cs.ctype != ""
|
||||
}
|
||||
cs.sniffed = true
|
||||
// If ReadAll hits EOF, it returns err==nil.
|
||||
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
|
||||
|
||||
// Don't try to detect the content type based on possibly incomplete data.
|
||||
if cs.err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
cs.ctype = http.DetectContentType(cs.start)
|
||||
return cs.ctype, true
|
||||
}
|
||||
|
||||
// DetermineContentType determines the content type of the supplied reader.
|
||||
// If the content type is already known, it can be specified via ctype.
|
||||
// Otherwise, the content of media will be sniffed to determine the content type.
|
||||
// If media implements googleapi.ContentTyper (deprecated), this will be used
|
||||
// instead of sniffing the content.
|
||||
// After calling DetectContentType the caller must not perform further reads on
|
||||
// media, but rather read from the Reader that is returned.
|
||||
func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
|
||||
// Note: callers could avoid calling DetectContentType if ctype != "",
|
||||
// but doing the check inside this function reduces the amount of
|
||||
// generated code.
|
||||
if ctype != "" {
|
||||
return media, ctype
|
||||
}
|
||||
|
||||
// For backwards compatability, allow clients to set content
|
||||
// type by providing a ContentTyper for media.
|
||||
if typer, ok := media.(googleapi.ContentTyper); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
sniffer := newContentSniffer(media)
|
||||
if ctype, ok := sniffer.ContentType(); ok {
|
||||
return sniffer, ctype
|
||||
}
|
||||
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
|
||||
return sniffer, ""
|
||||
}
|
||||
|
||||
type typeReader struct {
|
||||
io.Reader
|
||||
typ string
|
||||
}
|
||||
|
||||
// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body.
|
||||
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
|
||||
type multipartReader struct {
|
||||
pr *io.PipeReader
|
||||
ctype string
|
||||
mu sync.Mutex
|
||||
pipeOpen bool
|
||||
}
|
||||
|
||||
func newMultipartReader(parts []typeReader) *multipartReader {
|
||||
mp := &multipartReader{pipeOpen: true}
|
||||
var pw *io.PipeWriter
|
||||
mp.pr, pw = io.Pipe()
|
||||
mpw := multipart.NewWriter(pw)
|
||||
mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
|
||||
go func() {
|
||||
for _, part := range parts {
|
||||
w, err := mpw.CreatePart(typeHeader(part.typ))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, part.Reader)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mpw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Read(data []byte) (n int, err error) {
|
||||
return mp.pr.Read(data)
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Close() error {
|
||||
mp.mu.Lock()
|
||||
if !mp.pipeOpen {
|
||||
mp.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
mp.pipeOpen = false
|
||||
mp.mu.Unlock()
|
||||
return mp.pr.Close()
|
||||
}
|
||||
|
||||
// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
|
||||
// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
|
||||
//
|
||||
// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
|
||||
func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
|
||||
mp := newMultipartReader([]typeReader{
|
||||
{body, bodyContentType},
|
||||
{media, mediaContentType},
|
||||
})
|
||||
return mp, mp.ctype
|
||||
}
|
||||
|
||||
func typeHeader(contentType string) textproto.MIMEHeader {
|
||||
h := make(textproto.MIMEHeader)
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// PrepareUpload determines whether the data in the supplied reader should be
|
||||
// uploaded in a single request, or in sequential chunks.
|
||||
// chunkSize is the size of the chunk that media should be split into.
|
||||
//
|
||||
// If chunkSize is zero, media is returned as the first value, and the other
|
||||
// two return values are nil, true.
|
||||
//
|
||||
// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
|
||||
// contents of media fit in a single chunk.
|
||||
//
|
||||
// After PrepareUpload has been called, media should no longer be used: the
|
||||
// media content should be accessed via one of the return values.
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
|
||||
if chunkSize == 0 { // do not chunk
|
||||
return media, nil, true
|
||||
}
|
||||
mb = NewMediaBuffer(media, chunkSize)
|
||||
_, _, _, err := mb.Chunk()
|
||||
// If err is io.EOF, we can upload this in a single request. Otherwise, err is
|
||||
// either nil or a non-EOF error. If it is the latter, then the next call to
|
||||
// mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
|
||||
// error will be handled at some point.
|
||||
return nil, mb, err == io.EOF
|
||||
}
|
||||
|
||||
// MediaInfo holds information for media uploads. It is intended for use by generated
|
||||
// code only.
|
||||
type MediaInfo struct {
|
||||
// At most one of Media and MediaBuffer will be set.
|
||||
media io.Reader
|
||||
buffer *MediaBuffer
|
||||
singleChunk bool
|
||||
mType string
|
||||
size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
|
||||
progressUpdater googleapi.ProgressUpdater
|
||||
}
|
||||
|
||||
// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
|
||||
// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
|
||||
// if needed.
|
||||
func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
|
||||
mi := &MediaInfo{}
|
||||
opts := googleapi.ProcessMediaOptions(options)
|
||||
if !opts.ForceEmptyContentType {
|
||||
r, mi.mType = DetermineContentType(r, opts.ContentType)
|
||||
}
|
||||
mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
|
||||
return mi
|
||||
}
|
||||
|
||||
// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
|
||||
// call. It returns a MediaInfo using the given reader, size and media type.
|
||||
func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
|
||||
rdr := ReaderAtToReader(r, size)
|
||||
rdr, mType := DetermineContentType(rdr, mediaType)
|
||||
return &MediaInfo{
|
||||
size: size,
|
||||
mType: mType,
|
||||
buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
|
||||
media: nil,
|
||||
singleChunk: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
|
||||
if mi != nil {
|
||||
mi.progressUpdater = pu
|
||||
}
|
||||
}
|
||||
|
||||
// UploadType determines the type of upload: a single request, or a resumable
|
||||
// series of requests.
|
||||
func (mi *MediaInfo) UploadType() string {
|
||||
if mi.singleChunk {
|
||||
return "multipart"
|
||||
}
|
||||
return "resumable"
|
||||
}
|
||||
|
||||
// UploadRequest sets up an HTTP request for media upload. It adds headers
|
||||
// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
|
||||
func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
|
||||
cleanup = func() {}
|
||||
if mi == nil {
|
||||
return body, nil, cleanup
|
||||
}
|
||||
var media io.Reader
|
||||
if mi.media != nil {
|
||||
// This only happens when the caller has turned off chunking. In that
|
||||
// case, we write all of media in a single non-retryable request.
|
||||
media = mi.media
|
||||
} else if mi.singleChunk {
|
||||
// The data fits in a single chunk, which has now been read into the MediaBuffer.
|
||||
// We obtain that chunk so we can write it in a single request. The request can
|
||||
// be retried because the data is stored in the MediaBuffer.
|
||||
media, _, _, _ = mi.buffer.Chunk()
|
||||
}
|
||||
if media != nil {
|
||||
fb := readerFunc(body)
|
||||
fm := readerFunc(media)
|
||||
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
|
||||
if fb != nil && fm != nil {
|
||||
getBody = func() (io.ReadCloser, error) {
|
||||
rb := ioutil.NopCloser(fb())
|
||||
rm := ioutil.NopCloser(fm())
|
||||
r, _ := CombineBodyMedia(rb, "application/json", rm, mi.mType)
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
cleanup = func() { combined.Close() }
|
||||
reqHeaders.Set("Content-Type", ctype)
|
||||
body = combined
|
||||
}
|
||||
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
|
||||
reqHeaders.Set("X-Upload-Content-Type", mi.mType)
|
||||
}
|
||||
return body, getBody, cleanup
|
||||
}
|
||||
|
||||
// readerFunc returns a function that always returns an io.Reader that has the same
|
||||
// contents as r, provided that can be done without consuming r. Otherwise, it
|
||||
// returns nil.
|
||||
// See http.NewRequest (in net/http/request.go).
|
||||
func readerFunc(r io.Reader) func() io.Reader {
|
||||
switch r := r.(type) {
|
||||
case *bytes.Buffer:
|
||||
buf := r.Bytes()
|
||||
return func() io.Reader { return bytes.NewReader(buf) }
|
||||
case *bytes.Reader:
|
||||
snapshot := *r
|
||||
return func() io.Reader { r := snapshot; return &r }
|
||||
case *strings.Reader:
|
||||
snapshot := *r
|
||||
return func() io.Reader { r := snapshot; return &r }
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ResumableUpload returns an appropriately configured ResumableUpload value if the
|
||||
// upload is resumable, or nil otherwise.
|
||||
func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
|
||||
if mi == nil || mi.singleChunk {
|
||||
return nil
|
||||
}
|
||||
return &ResumableUpload{
|
||||
URI: locURI,
|
||||
Media: mi.buffer,
|
||||
MediaType: mi.mType,
|
||||
Callback: func(curr int64) {
|
||||
if mi.progressUpdater != nil {
|
||||
mi.progressUpdater(curr, mi.size)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
407
vendor/google.golang.org/api/gensupport/media_test.go
generated
vendored
Normal file
407
vendor/google.golang.org/api/gensupport/media_test.go
generated
vendored
Normal file
@@ -0,0 +1,407 @@
|
||||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
func TestContentSniffing(t *testing.T) {
|
||||
type testCase struct {
|
||||
data []byte // the data to read from the Reader
|
||||
finalErr error // error to return after data has been read
|
||||
|
||||
wantContentType string
|
||||
wantContentTypeResult bool
|
||||
}
|
||||
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
data: []byte{0, 0, 0, 0},
|
||||
finalErr: nil,
|
||||
wantContentType: "application/octet-stream",
|
||||
wantContentTypeResult: true,
|
||||
},
|
||||
{
|
||||
data: []byte(""),
|
||||
finalErr: nil,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentTypeResult: true,
|
||||
},
|
||||
{
|
||||
data: []byte(""),
|
||||
finalErr: io.ErrUnexpectedEOF,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentTypeResult: false,
|
||||
},
|
||||
{
|
||||
data: []byte("abc"),
|
||||
finalErr: nil,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentTypeResult: true,
|
||||
},
|
||||
{
|
||||
data: []byte("abc"),
|
||||
finalErr: io.ErrUnexpectedEOF,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentTypeResult: false,
|
||||
},
|
||||
// The following examples contain more bytes than are buffered for sniffing.
|
||||
{
|
||||
data: bytes.Repeat([]byte("a"), 513),
|
||||
finalErr: nil,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentTypeResult: true,
|
||||
},
|
||||
{
|
||||
data: bytes.Repeat([]byte("a"), 513),
|
||||
finalErr: io.ErrUnexpectedEOF,
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
wantContentTypeResult: true, // true because error is after first 512 bytes.
|
||||
},
|
||||
} {
|
||||
er := &errReader{buf: tc.data, err: tc.finalErr}
|
||||
|
||||
sct := newContentSniffer(er)
|
||||
|
||||
// Even if was an error during the first 512 bytes, we should still be able to read those bytes.
|
||||
buf, err := ioutil.ReadAll(sct)
|
||||
|
||||
if !reflect.DeepEqual(buf, tc.data) {
|
||||
t.Fatalf("Failed reading buffer: got: %q; want:%q", buf, tc.data)
|
||||
}
|
||||
|
||||
if err != tc.finalErr {
|
||||
t.Fatalf("Reading buffer error: got: %v; want: %v", err, tc.finalErr)
|
||||
}
|
||||
|
||||
ct, ok := sct.ContentType()
|
||||
if ok != tc.wantContentTypeResult {
|
||||
t.Fatalf("Content type result got: %v; want: %v", ok, tc.wantContentTypeResult)
|
||||
}
|
||||
if ok && ct != tc.wantContentType {
|
||||
t.Fatalf("Content type got: %q; want: %q", ct, tc.wantContentType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type staticContentTyper struct {
|
||||
io.Reader
|
||||
}
|
||||
|
||||
func (sct staticContentTyper) ContentType() string {
|
||||
return "static content type"
|
||||
}
|
||||
|
||||
func TestDetermineContentType(t *testing.T) {
|
||||
data := []byte("abc")
|
||||
rdr := func() io.Reader {
|
||||
return bytes.NewBuffer(data)
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
r io.Reader
|
||||
explicitConentType string
|
||||
wantContentType string
|
||||
}
|
||||
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
r: rdr(),
|
||||
wantContentType: "text/plain; charset=utf-8",
|
||||
},
|
||||
{
|
||||
r: staticContentTyper{rdr()},
|
||||
wantContentType: "static content type",
|
||||
},
|
||||
{
|
||||
r: staticContentTyper{rdr()},
|
||||
explicitConentType: "explicit",
|
||||
wantContentType: "explicit",
|
||||
},
|
||||
} {
|
||||
r, ctype := DetermineContentType(tc.r, tc.explicitConentType)
|
||||
got, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed reading buffer: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, data) {
|
||||
t.Fatalf("Failed reading buffer: got: %q; want:%q", got, data)
|
||||
}
|
||||
|
||||
if ctype != tc.wantContentType {
|
||||
t.Fatalf("Content type got: %q; want: %q", ctype, tc.wantContentType)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewInfoFromMedia(t *testing.T) {
|
||||
const textType = "text/plain; charset=utf-8"
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
r io.Reader
|
||||
opts []googleapi.MediaOption
|
||||
wantType string
|
||||
wantMedia, wantBuffer, wantSingleChunk bool
|
||||
}{
|
||||
{
|
||||
desc: "an empty reader results in a MediaBuffer with a single, empty chunk",
|
||||
r: new(bytes.Buffer),
|
||||
opts: nil,
|
||||
wantType: textType,
|
||||
wantBuffer: true,
|
||||
wantSingleChunk: true,
|
||||
},
|
||||
{
|
||||
desc: "ContentType is observed",
|
||||
r: new(bytes.Buffer),
|
||||
opts: []googleapi.MediaOption{googleapi.ContentType("xyz")},
|
||||
wantType: "xyz",
|
||||
wantBuffer: true,
|
||||
wantSingleChunk: true,
|
||||
},
|
||||
{
|
||||
desc: "chunk size of zero: don't use a MediaBuffer; upload as a single chunk",
|
||||
r: strings.NewReader("12345"),
|
||||
opts: []googleapi.MediaOption{googleapi.ChunkSize(0)},
|
||||
wantType: textType,
|
||||
wantMedia: true,
|
||||
wantSingleChunk: true,
|
||||
},
|
||||
{
|
||||
desc: "chunk size > data size: MediaBuffer with single chunk",
|
||||
r: strings.NewReader("12345"),
|
||||
opts: []googleapi.MediaOption{googleapi.ChunkSize(100)},
|
||||
wantType: textType,
|
||||
wantBuffer: true,
|
||||
wantSingleChunk: true,
|
||||
},
|
||||
{
|
||||
desc: "chunk size == data size: MediaBuffer with single chunk",
|
||||
r: &nullReader{googleapi.MinUploadChunkSize},
|
||||
opts: []googleapi.MediaOption{googleapi.ChunkSize(1)},
|
||||
wantType: "application/octet-stream",
|
||||
wantBuffer: true,
|
||||
wantSingleChunk: true,
|
||||
},
|
||||
{
|
||||
desc: "chunk size < data size: MediaBuffer, not single chunk",
|
||||
// Note that ChunkSize = 1 is rounded up to googleapi.MinUploadChunkSize.
|
||||
r: &nullReader{2 * googleapi.MinUploadChunkSize},
|
||||
opts: []googleapi.MediaOption{googleapi.ChunkSize(1)},
|
||||
wantType: "application/octet-stream",
|
||||
wantBuffer: true,
|
||||
wantSingleChunk: false,
|
||||
},
|
||||
} {
|
||||
|
||||
mi := NewInfoFromMedia(test.r, test.opts)
|
||||
if got, want := mi.mType, test.wantType; got != want {
|
||||
t.Errorf("%s: type: got %q, want %q", test.desc, got, want)
|
||||
}
|
||||
if got, want := (mi.media != nil), test.wantMedia; got != want {
|
||||
t.Errorf("%s: media non-nil: got %t, want %t", test.desc, got, want)
|
||||
}
|
||||
if got, want := (mi.buffer != nil), test.wantBuffer; got != want {
|
||||
t.Errorf("%s: buffer non-nil: got %t, want %t", test.desc, got, want)
|
||||
}
|
||||
if got, want := mi.singleChunk, test.wantSingleChunk; got != want {
|
||||
t.Errorf("%s: singleChunk: got %t, want %t", test.desc, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploadRequest(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
r io.Reader
|
||||
chunkSize int
|
||||
wantContentType string
|
||||
wantUploadType string
|
||||
}{
|
||||
{
|
||||
desc: "chunk size of zero: don't use a MediaBuffer; upload as a single chunk",
|
||||
r: strings.NewReader("12345"),
|
||||
chunkSize: 0,
|
||||
wantContentType: "multipart/related;",
|
||||
},
|
||||
{
|
||||
desc: "chunk size > data size: MediaBuffer with single chunk",
|
||||
r: strings.NewReader("12345"),
|
||||
chunkSize: 100,
|
||||
wantContentType: "multipart/related;",
|
||||
},
|
||||
{
|
||||
desc: "chunk size == data size: MediaBuffer with single chunk",
|
||||
r: &nullReader{googleapi.MinUploadChunkSize},
|
||||
chunkSize: 1,
|
||||
wantContentType: "multipart/related;",
|
||||
},
|
||||
{
|
||||
desc: "chunk size < data size: MediaBuffer, not single chunk",
|
||||
// Note that ChunkSize = 1 is rounded up to googleapi.MinUploadChunkSize.
|
||||
r: &nullReader{2 * googleapi.MinUploadChunkSize},
|
||||
chunkSize: 1,
|
||||
wantUploadType: "application/octet-stream",
|
||||
},
|
||||
} {
|
||||
mi := NewInfoFromMedia(test.r, []googleapi.MediaOption{googleapi.ChunkSize(test.chunkSize)})
|
||||
h := http.Header{}
|
||||
mi.UploadRequest(h, new(bytes.Buffer))
|
||||
if got, want := h.Get("Content-Type"), test.wantContentType; !strings.HasPrefix(got, want) {
|
||||
t.Errorf("%s: Content-Type: got %q, want prefix %q", test.desc, got, want)
|
||||
}
|
||||
if got, want := h.Get("X-Upload-Content-Type"), test.wantUploadType; got != want {
|
||||
t.Errorf("%s: X-Upload-Content-Type: got %q, want %q", test.desc, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUploadRequestGetBody(t *testing.T) {
|
||||
// Test that a single chunk results in a getBody function that is non-nil, and
|
||||
// that produces the same content as the original body.
|
||||
|
||||
// Mock out rand.Reader so we use the same multipart boundary every time.
|
||||
rr := rand.Reader
|
||||
rand.Reader = &nullReader{1000}
|
||||
defer func() {
|
||||
rand.Reader = rr
|
||||
}()
|
||||
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
r io.Reader
|
||||
chunkSize int
|
||||
wantGetBody bool
|
||||
wantContentType string
|
||||
wantUploadType string
|
||||
}{
|
||||
{
|
||||
desc: "chunk size of zero: no getBody",
|
||||
r: &nullReader{10},
|
||||
chunkSize: 0,
|
||||
wantGetBody: false,
|
||||
},
|
||||
{
|
||||
desc: "chunk size == data size: 1 chunk, getBody",
|
||||
r: &nullReader{googleapi.MinUploadChunkSize},
|
||||
chunkSize: 1,
|
||||
wantGetBody: true,
|
||||
},
|
||||
{
|
||||
desc: "chunk size < data size: MediaBuffer, >1 chunk, no getBody",
|
||||
// No getBody here, because the initial request contains no media data
|
||||
// Note that ChunkSize = 1 is rounded up to googleapi.MinUploadChunkSize.
|
||||
r: &nullReader{2 * googleapi.MinUploadChunkSize},
|
||||
chunkSize: 1,
|
||||
wantGetBody: false,
|
||||
},
|
||||
} {
|
||||
mi := NewInfoFromMedia(test.r, []googleapi.MediaOption{googleapi.ChunkSize(test.chunkSize)})
|
||||
r, getBody, _ := mi.UploadRequest(http.Header{}, bytes.NewBuffer([]byte("body")))
|
||||
if got, want := (getBody != nil), test.wantGetBody; got != want {
|
||||
t.Errorf("%s: getBody: got %t, want %t", test.desc, got, want)
|
||||
continue
|
||||
}
|
||||
if getBody == nil {
|
||||
continue
|
||||
}
|
||||
want, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := 0; i < 3; i++ {
|
||||
rc, err := getBody()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !bytes.Equal(got, want) {
|
||||
t.Errorf("%s, %d:\ngot:\n%s\nwant:\n%s", test.desc, i, string(got), string(want))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResumableUpload(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
r io.Reader
|
||||
chunkSize int
|
||||
wantUploadType string
|
||||
wantResumableUpload bool
|
||||
}{
|
||||
{
|
||||
desc: "chunk size of zero: don't use a MediaBuffer; upload as a single chunk",
|
||||
r: strings.NewReader("12345"),
|
||||
chunkSize: 0,
|
||||
wantUploadType: "multipart",
|
||||
wantResumableUpload: false,
|
||||
},
|
||||
{
|
||||
desc: "chunk size > data size: MediaBuffer with single chunk",
|
||||
r: strings.NewReader("12345"),
|
||||
chunkSize: 100,
|
||||
wantUploadType: "multipart",
|
||||
wantResumableUpload: false,
|
||||
},
|
||||
{
|
||||
desc: "chunk size == data size: MediaBuffer with single chunk",
|
||||
// (Because nullReader returns EOF with the last bytes.)
|
||||
r: &nullReader{googleapi.MinUploadChunkSize},
|
||||
chunkSize: googleapi.MinUploadChunkSize,
|
||||
wantUploadType: "multipart",
|
||||
wantResumableUpload: false,
|
||||
},
|
||||
{
|
||||
desc: "chunk size < data size: MediaBuffer, not single chunk",
|
||||
// Note that ChunkSize = 1 is rounded up to googleapi.MinUploadChunkSize.
|
||||
r: &nullReader{2 * googleapi.MinUploadChunkSize},
|
||||
chunkSize: 1,
|
||||
wantUploadType: "resumable",
|
||||
wantResumableUpload: true,
|
||||
},
|
||||
} {
|
||||
mi := NewInfoFromMedia(test.r, []googleapi.MediaOption{googleapi.ChunkSize(test.chunkSize)})
|
||||
if got, want := mi.UploadType(), test.wantUploadType; got != want {
|
||||
t.Errorf("%s: upload type: got %q, want %q", test.desc, got, want)
|
||||
}
|
||||
if got, want := mi.ResumableUpload("") != nil, test.wantResumableUpload; got != want {
|
||||
t.Errorf("%s: resumable upload non-nil: got %t, want %t", test.desc, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A nullReader simulates reading a fixed number of bytes.
|
||||
type nullReader struct {
|
||||
remain int
|
||||
}
|
||||
|
||||
// Read doesn't touch buf, but it does reduce the amount of bytes remaining
|
||||
// by len(buf).
|
||||
func (r *nullReader) Read(buf []byte) (int, error) {
|
||||
n := len(buf)
|
||||
if r.remain < n {
|
||||
n = r.remain
|
||||
}
|
||||
r.remain -= n
|
||||
var err error
|
||||
if r.remain == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
return n, err
|
||||
}
|
14
vendor/google.golang.org/api/gensupport/not_go18.go
generated
vendored
Normal file
14
vendor/google.golang.org/api/gensupport/not_go18.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.8
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {}
|
50
vendor/google.golang.org/api/gensupport/params.go
generated
vendored
Normal file
50
vendor/google.golang.org/api/gensupport/params.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// URLParams is a simplified replacement for url.Values
|
||||
// that safely builds up URL parameters for encoding.
|
||||
type URLParams map[string][]string
|
||||
|
||||
// Get returns the first value for the given key, or "".
|
||||
func (u URLParams) Get(key string) string {
|
||||
vs := u[key]
|
||||
if len(vs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return vs[0]
|
||||
}
|
||||
|
||||
// Set sets the key to value.
|
||||
// It replaces any existing values.
|
||||
func (u URLParams) Set(key, value string) {
|
||||
u[key] = []string{value}
|
||||
}
|
||||
|
||||
// SetMulti sets the key to an array of values.
|
||||
// It replaces any existing values.
|
||||
// Note that values must not be modified after calling SetMulti
|
||||
// so the caller is responsible for making a copy if necessary.
|
||||
func (u URLParams) SetMulti(key string, values []string) {
|
||||
u[key] = values
|
||||
}
|
||||
|
||||
// Encode encodes the values into ``URL encoded'' form
|
||||
// ("bar=baz&foo=quux") sorted by key.
|
||||
func (u URLParams) Encode() string {
|
||||
return url.Values(u).Encode()
|
||||
}
|
||||
|
||||
func SetOptions(u URLParams, opts ...googleapi.CallOption) {
|
||||
for _, o := range opts {
|
||||
u.Set(o.Get())
|
||||
}
|
||||
}
|
217
vendor/google.golang.org/api/gensupport/resumable.go
generated
vendored
Normal file
217
vendor/google.golang.org/api/gensupport/resumable.go
generated
vendored
Normal file
@@ -0,0 +1,217 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusTooManyRequests is returned by the storage API if the
|
||||
// per-project limits have been temporarily exceeded. The request
|
||||
// should be retried.
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
|
||||
statusTooManyRequests = 429
|
||||
)
|
||||
|
||||
// ResumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type ResumableUpload struct {
|
||||
Client *http.Client
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
UserAgent string // User-Agent for header of the request
|
||||
// Media is the object being uploaded.
|
||||
Media *MediaBuffer
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
|
||||
mu sync.Mutex // guards progress
|
||||
progress int64 // number of bytes uploaded so far
|
||||
|
||||
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
|
||||
Callback func(int64)
|
||||
|
||||
// If not specified, a default exponential backoff strategy will be used.
|
||||
Backoff BackoffStrategy
|
||||
}
|
||||
|
||||
// Progress returns the number of bytes uploaded at this point.
|
||||
func (rx *ResumableUpload) Progress() int64 {
|
||||
rx.mu.Lock()
|
||||
defer rx.mu.Unlock()
|
||||
return rx.progress
|
||||
}
|
||||
|
||||
// doUploadRequest performs a single HTTP request to upload data.
|
||||
// off specifies the offset in rx.Media from which data is drawn.
|
||||
// size is the number of bytes in data.
|
||||
// final specifies whether data is the final chunk to be uploaded.
|
||||
func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", rx.URI, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.ContentLength = size
|
||||
var contentRange string
|
||||
if final {
|
||||
if size == 0 {
|
||||
contentRange = fmt.Sprintf("bytes */%v", off)
|
||||
} else {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
|
||||
}
|
||||
} else {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
|
||||
}
|
||||
req.Header.Set("Content-Range", contentRange)
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
|
||||
// Google's upload endpoint uses status code 308 for a
|
||||
// different purpose than the "308 Permanent Redirect"
|
||||
// since-standardized in RFC 7238. Because of the conflict in
|
||||
// semantics, Google added this new request header which
|
||||
// causes it to not use "308" and instead reply with 200 OK
|
||||
// and sets the upload-specific "X-HTTP-Status-Code-Override:
|
||||
// 308" response header.
|
||||
req.Header.Set("X-GUploader-No-308", "yes")
|
||||
|
||||
return SendRequest(ctx, rx.Client, req)
|
||||
}
|
||||
|
||||
func statusResumeIncomplete(resp *http.Response) bool {
|
||||
// This is how the server signals "status resume incomplete"
|
||||
// when X-GUploader-No-308 is set to "yes":
|
||||
return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
|
||||
}
|
||||
|
||||
// reportProgress calls a user-supplied callback to report upload progress.
|
||||
// If old==updated, the callback is not called.
|
||||
func (rx *ResumableUpload) reportProgress(old, updated int64) {
|
||||
if updated-old == 0 {
|
||||
return
|
||||
}
|
||||
rx.mu.Lock()
|
||||
rx.progress = updated
|
||||
rx.mu.Unlock()
|
||||
if rx.Callback != nil {
|
||||
rx.Callback(updated)
|
||||
}
|
||||
}
|
||||
|
||||
// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
|
||||
func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
|
||||
chunk, off, size, err := rx.Media.Chunk()
|
||||
|
||||
done := err == io.EOF
|
||||
if !done && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// We sent "X-GUploader-No-308: yes" (see comment elsewhere in
|
||||
// this file), so we don't expect to get a 308.
|
||||
if res.StatusCode == 308 {
|
||||
return nil, errors.New("unexpected 308 response status code")
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusOK {
|
||||
rx.reportProgress(off, off+int64(size))
|
||||
}
|
||||
|
||||
if statusResumeIncomplete(res) {
|
||||
rx.Media.Next()
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func contextDone(ctx context.Context) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Upload starts the process of a resumable upload with a cancellable context.
|
||||
// It retries using the provided back off strategy until cancelled or the
|
||||
// strategy indicates to stop retrying.
|
||||
// It is called from the auto-generated API code and is not visible to the user.
|
||||
// Before sending an HTTP request, Upload calls any registered hook functions,
|
||||
// and calls the returned functions after the request returns (see send.go).
|
||||
// rx is private to the auto-generated API code.
|
||||
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
|
||||
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
|
||||
var pause time.Duration
|
||||
backoff := rx.Backoff
|
||||
if backoff == nil {
|
||||
backoff = DefaultBackoffStrategy()
|
||||
}
|
||||
|
||||
for {
|
||||
// Ensure that we return in the case of cancelled context, even if pause is 0.
|
||||
if contextDone(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(pause):
|
||||
}
|
||||
|
||||
resp, err = rx.transferChunk(ctx)
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should retry the request.
|
||||
if shouldRetry(status, err) {
|
||||
var retry bool
|
||||
pause, retry = backoff.Pause()
|
||||
if retry {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If the chunk was uploaded successfully, but there's still
|
||||
// more to go, upload the next chunk without any delay.
|
||||
if statusResumeIncomplete(resp) {
|
||||
pause = 0
|
||||
backoff.Reset()
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// It's possible for err and resp to both be non-nil here, but we expose a simpler
|
||||
// contract to our callers: exactly one of resp and err will be non-nil. This means
|
||||
// that any response body must be closed here before returning a non-nil error.
|
||||
if err != nil {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
}
|
281
vendor/google.golang.org/api/gensupport/resumable_test.go
generated
vendored
Normal file
281
vendor/google.golang.org/api/gensupport/resumable_test.go
generated
vendored
Normal file
@@ -0,0 +1,281 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type unexpectedReader struct{}
|
||||
|
||||
func (unexpectedReader) Read([]byte) (int, error) {
|
||||
return 0, fmt.Errorf("unexpected read in test")
|
||||
}
|
||||
|
||||
// event is an expected request/response pair
|
||||
type event struct {
|
||||
// the byte range header that should be present in a request.
|
||||
byteRange string
|
||||
// the http status code to send in response.
|
||||
responseStatus int
|
||||
}
|
||||
|
||||
// interruptibleTransport is configured with a canned set of requests/responses.
|
||||
// It records the incoming data, unless the corresponding event is configured to return
|
||||
// http.StatusServiceUnavailable.
|
||||
type interruptibleTransport struct {
|
||||
events []event
|
||||
buf []byte
|
||||
bodies bodyTracker
|
||||
}
|
||||
|
||||
// bodyTracker keeps track of response bodies that have not been closed.
|
||||
type bodyTracker map[io.ReadCloser]struct{}
|
||||
|
||||
func (bt bodyTracker) Add(body io.ReadCloser) {
|
||||
bt[body] = struct{}{}
|
||||
}
|
||||
|
||||
func (bt bodyTracker) Close(body io.ReadCloser) {
|
||||
delete(bt, body)
|
||||
}
|
||||
|
||||
type trackingCloser struct {
|
||||
io.Reader
|
||||
tracker bodyTracker
|
||||
}
|
||||
|
||||
func (tc *trackingCloser) Close() error {
|
||||
tc.tracker.Close(tc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tc *trackingCloser) Open() {
|
||||
tc.tracker.Add(tc)
|
||||
}
|
||||
|
||||
func (t *interruptibleTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
ev := t.events[0]
|
||||
t.events = t.events[1:]
|
||||
if got, want := req.Header.Get("Content-Range"), ev.byteRange; got != want {
|
||||
return nil, fmt.Errorf("byte range: got %s; want %s", got, want)
|
||||
}
|
||||
|
||||
if ev.responseStatus != http.StatusServiceUnavailable {
|
||||
buf, err := ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading from request data: %v", err)
|
||||
}
|
||||
t.buf = append(t.buf, buf...)
|
||||
}
|
||||
|
||||
tc := &trackingCloser{unexpectedReader{}, t.bodies}
|
||||
tc.Open()
|
||||
h := http.Header{}
|
||||
status := ev.responseStatus
|
||||
|
||||
// Support "X-GUploader-No-308" like Google:
|
||||
if status == 308 && req.Header.Get("X-GUploader-No-308") == "yes" {
|
||||
status = 200
|
||||
h.Set("X-Http-Status-Code-Override", "308")
|
||||
}
|
||||
|
||||
res := &http.Response{
|
||||
StatusCode: status,
|
||||
Header: h,
|
||||
Body: tc,
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// progressRecorder records updates, and calls f for every invocation of ProgressUpdate.
|
||||
type progressRecorder struct {
|
||||
updates []int64
|
||||
f func()
|
||||
}
|
||||
|
||||
func (pr *progressRecorder) ProgressUpdate(current int64) {
|
||||
pr.updates = append(pr.updates, current)
|
||||
if pr.f != nil {
|
||||
pr.f()
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterruptedTransferChunks(t *testing.T) {
|
||||
type testCase struct {
|
||||
data string
|
||||
chunkSize int
|
||||
events []event
|
||||
wantProgress []int64
|
||||
}
|
||||
|
||||
for _, tc := range []testCase{
|
||||
{
|
||||
data: strings.Repeat("a", 300),
|
||||
chunkSize: 90,
|
||||
events: []event{
|
||||
{"bytes 0-89/*", http.StatusServiceUnavailable},
|
||||
{"bytes 0-89/*", 308},
|
||||
{"bytes 90-179/*", 308},
|
||||
{"bytes 180-269/*", http.StatusServiceUnavailable},
|
||||
{"bytes 180-269/*", 308},
|
||||
{"bytes 270-299/300", 200},
|
||||
},
|
||||
|
||||
wantProgress: []int64{90, 180, 270, 300},
|
||||
},
|
||||
{
|
||||
data: strings.Repeat("a", 20),
|
||||
chunkSize: 10,
|
||||
events: []event{
|
||||
{"bytes 0-9/*", http.StatusServiceUnavailable},
|
||||
{"bytes 0-9/*", 308},
|
||||
{"bytes 10-19/*", http.StatusServiceUnavailable},
|
||||
{"bytes 10-19/*", 308},
|
||||
// 0 byte final request demands a byte range with leading asterix.
|
||||
{"bytes */20", http.StatusServiceUnavailable},
|
||||
{"bytes */20", 200},
|
||||
},
|
||||
|
||||
wantProgress: []int64{10, 20},
|
||||
},
|
||||
} {
|
||||
media := strings.NewReader(tc.data)
|
||||
|
||||
tr := &interruptibleTransport{
|
||||
buf: make([]byte, 0, len(tc.data)),
|
||||
events: tc.events,
|
||||
bodies: bodyTracker{},
|
||||
}
|
||||
|
||||
pr := progressRecorder{}
|
||||
rx := &ResumableUpload{
|
||||
Client: &http.Client{Transport: tr},
|
||||
Media: NewMediaBuffer(media, tc.chunkSize),
|
||||
MediaType: "text/plain",
|
||||
Callback: pr.ProgressUpdate,
|
||||
Backoff: NoPauseStrategy,
|
||||
}
|
||||
res, err := rx.Upload(context.Background())
|
||||
if err == nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
if err != nil || res == nil || res.StatusCode != http.StatusOK {
|
||||
if res == nil {
|
||||
t.Errorf("Upload not successful, res=nil: %v", err)
|
||||
} else {
|
||||
t.Errorf("Upload not successful, statusCode=%v: %v", res.StatusCode, err)
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(tr.buf, []byte(tc.data)) {
|
||||
t.Errorf("transferred contents:\ngot %s\nwant %s", tr.buf, tc.data)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(pr.updates, tc.wantProgress) {
|
||||
t.Errorf("progress updates: got %v, want %v", pr.updates, tc.wantProgress)
|
||||
}
|
||||
|
||||
if len(tr.events) > 0 {
|
||||
t.Errorf("did not observe all expected events. leftover events: %v", tr.events)
|
||||
}
|
||||
if len(tr.bodies) > 0 {
|
||||
t.Errorf("unclosed request bodies: %v", tr.bodies)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelUploadFast(t *testing.T) {
|
||||
const (
|
||||
chunkSize = 90
|
||||
mediaSize = 300
|
||||
)
|
||||
media := strings.NewReader(strings.Repeat("a", mediaSize))
|
||||
|
||||
tr := &interruptibleTransport{
|
||||
buf: make([]byte, 0, mediaSize),
|
||||
}
|
||||
|
||||
pr := progressRecorder{}
|
||||
rx := &ResumableUpload{
|
||||
Client: &http.Client{Transport: tr},
|
||||
Media: NewMediaBuffer(media, chunkSize),
|
||||
MediaType: "text/plain",
|
||||
Callback: pr.ProgressUpdate,
|
||||
Backoff: NoPauseStrategy,
|
||||
}
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
cancelFunc() // stop the upload that hasn't started yet
|
||||
res, err := rx.Upload(ctx)
|
||||
if err != context.Canceled {
|
||||
t.Errorf("Upload err: got: %v; want: context cancelled", err)
|
||||
}
|
||||
if res != nil {
|
||||
t.Errorf("Upload result: got: %v; want: nil", res)
|
||||
}
|
||||
if pr.updates != nil {
|
||||
t.Errorf("progress updates: got %v; want: nil", pr.updates)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelUpload(t *testing.T) {
|
||||
const (
|
||||
chunkSize = 90
|
||||
mediaSize = 300
|
||||
)
|
||||
media := strings.NewReader(strings.Repeat("a", mediaSize))
|
||||
|
||||
tr := &interruptibleTransport{
|
||||
buf: make([]byte, 0, mediaSize),
|
||||
events: []event{
|
||||
{"bytes 0-89/*", http.StatusServiceUnavailable},
|
||||
{"bytes 0-89/*", 308},
|
||||
{"bytes 90-179/*", 308},
|
||||
{"bytes 180-269/*", 308}, // Upload should be cancelled before this event.
|
||||
},
|
||||
bodies: bodyTracker{},
|
||||
}
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
numUpdates := 0
|
||||
|
||||
pr := progressRecorder{f: func() {
|
||||
numUpdates++
|
||||
if numUpdates >= 2 {
|
||||
cancelFunc()
|
||||
}
|
||||
}}
|
||||
|
||||
rx := &ResumableUpload{
|
||||
Client: &http.Client{Transport: tr},
|
||||
Media: NewMediaBuffer(media, chunkSize),
|
||||
MediaType: "text/plain",
|
||||
Callback: pr.ProgressUpdate,
|
||||
Backoff: NoPauseStrategy,
|
||||
}
|
||||
res, err := rx.Upload(ctx)
|
||||
if err != context.Canceled {
|
||||
t.Errorf("Upload err: got: %v; want: context cancelled", err)
|
||||
}
|
||||
if res != nil {
|
||||
t.Errorf("Upload result: got: %v; want: nil", res)
|
||||
}
|
||||
if got, want := tr.buf, []byte(strings.Repeat("a", chunkSize*2)); !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("transferred contents:\ngot %s\nwant %s", got, want)
|
||||
}
|
||||
if got, want := pr.updates, []int64{chunkSize, chunkSize * 2}; !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("progress updates: got %v; want: %v", got, want)
|
||||
}
|
||||
if len(tr.bodies) > 0 {
|
||||
t.Errorf("unclosed request bodies: %v", tr.bodies)
|
||||
}
|
||||
}
|
85
vendor/google.golang.org/api/gensupport/retry.go
generated
vendored
Normal file
85
vendor/google.golang.org/api/gensupport/retry.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Retry invokes the given function, retrying it multiple times if the connection failed or
|
||||
// the HTTP status response indicates the request should be attempted again. ctx may be nil.
|
||||
func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
|
||||
for {
|
||||
resp, err := f()
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Return if we shouldn't retry.
|
||||
pause, retry := backoff.Pause()
|
||||
if !shouldRetry(status, err) || !retry {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Ensure the response body is closed, if any.
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// Pause, but still listen to ctx.Done if context is not nil.
|
||||
var done <-chan struct{}
|
||||
if ctx != nil {
|
||||
done = ctx.Done()
|
||||
}
|
||||
select {
|
||||
case <-done:
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(pause):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
|
||||
func DefaultBackoffStrategy() BackoffStrategy {
|
||||
return &ExponentialBackoff{
|
||||
Base: 250 * time.Millisecond,
|
||||
Max: 16 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRetry returns true if the HTTP response / error indicates that the
|
||||
// request should be attempted again.
|
||||
func shouldRetry(status int, err error) bool {
|
||||
if 500 <= status && status <= 599 {
|
||||
return true
|
||||
}
|
||||
if status == statusTooManyRequests {
|
||||
return true
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(net.Error); ok {
|
||||
return err.Temporary()
|
||||
}
|
||||
return false
|
||||
}
|
176
vendor/google.golang.org/api/gensupport/retry_test.go
generated
vendored
Normal file
176
vendor/google.golang.org/api/gensupport/retry_test.go
generated
vendored
Normal file
@@ -0,0 +1,176 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestRetry(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
respStatus []int // HTTP status codes returned (length indicates number of calls we expect).
|
||||
maxRetry int // Max number of calls allowed by the BackoffStrategy.
|
||||
wantStatus int // StatusCode of returned response.
|
||||
}{
|
||||
{
|
||||
desc: "First call successful",
|
||||
respStatus: []int{200},
|
||||
maxRetry: 3,
|
||||
wantStatus: 200,
|
||||
},
|
||||
{
|
||||
desc: "Retry before success",
|
||||
respStatus: []int{500, 500, 500, 200},
|
||||
maxRetry: 3,
|
||||
wantStatus: 200,
|
||||
},
|
||||
{
|
||||
desc: "Backoff strategy abandons after 3 retries",
|
||||
respStatus: []int{500, 500, 500, 500},
|
||||
maxRetry: 3,
|
||||
wantStatus: 500,
|
||||
},
|
||||
{
|
||||
desc: "Backoff strategy abandons after 2 retries",
|
||||
respStatus: []int{500, 500, 500},
|
||||
maxRetry: 2,
|
||||
wantStatus: 500,
|
||||
},
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
// Function consumes tt.respStatus
|
||||
f := func() (*http.Response, error) {
|
||||
if len(tt.respStatus) == 0 {
|
||||
return nil, errors.New("too many requests to function")
|
||||
}
|
||||
resp := &http.Response{StatusCode: tt.respStatus[0]}
|
||||
tt.respStatus = tt.respStatus[1:]
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
backoff := &LimitRetryStrategy{
|
||||
Max: tt.maxRetry,
|
||||
Strategy: NoPauseStrategy,
|
||||
}
|
||||
|
||||
resp, err := Retry(nil, f, backoff)
|
||||
if err != nil {
|
||||
t.Errorf("%s: Retry returned err %v", tt.desc, err)
|
||||
}
|
||||
if got := resp.StatusCode; got != tt.wantStatus {
|
||||
t.Errorf("%s: Retry returned response with StatusCode=%d; want %d", tt.desc, got, tt.wantStatus)
|
||||
}
|
||||
if len(tt.respStatus) != 0 {
|
||||
t.Errorf("%s: f was not called enough; status codes remaining: %v", tt.desc, tt.respStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type checkCloseReader struct {
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (c *checkCloseReader) Read(p []byte) (n int, err error) { return 0, io.EOF }
|
||||
func (c *checkCloseReader) Close() error {
|
||||
c.closed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestRetryClosesBody(t *testing.T) {
|
||||
var i int
|
||||
responses := []*http.Response{
|
||||
{StatusCode: 500, Body: &checkCloseReader{}},
|
||||
{StatusCode: 500, Body: &checkCloseReader{}},
|
||||
{StatusCode: 200, Body: &checkCloseReader{}},
|
||||
}
|
||||
f := func() (*http.Response, error) {
|
||||
resp := responses[i]
|
||||
i++
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
resp, err := Retry(nil, f, NoPauseStrategy)
|
||||
if err != nil {
|
||||
t.Fatalf("Retry returned error: %v", err)
|
||||
}
|
||||
if resp != responses[2] {
|
||||
t.Errorf("Retry returned %v; want %v", resp, responses[2])
|
||||
}
|
||||
for i, resp := range responses {
|
||||
want := i != 2 // Only the last response should not be closed.
|
||||
got := resp.Body.(*checkCloseReader).closed
|
||||
if got != want {
|
||||
t.Errorf("response[%d].Body closed = %t, want %t", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func RetryReturnsOnContextCancel(t *testing.T) {
|
||||
f := func() (*http.Response, error) {
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
backoff := UniformPauseStrategy(time.Hour)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
_, err := Retry(ctx, f, backoff)
|
||||
errc <- err
|
||||
}()
|
||||
|
||||
cancel()
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != ctx.Err() {
|
||||
t.Errorf("Retry returned err: %v, want %v", err, ctx.Err())
|
||||
}
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("Timed out waiting for Retry to return")
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldRetry(t *testing.T) {
|
||||
testCases := []struct {
|
||||
status int
|
||||
err error
|
||||
want bool
|
||||
}{
|
||||
{status: 200, want: false},
|
||||
{status: 308, want: false},
|
||||
{status: 403, want: false},
|
||||
{status: 429, want: true},
|
||||
{status: 500, want: true},
|
||||
{status: 503, want: true},
|
||||
{status: 600, want: false},
|
||||
{err: io.EOF, want: false},
|
||||
{err: errors.New("random badness"), want: false},
|
||||
{err: io.ErrUnexpectedEOF, want: true},
|
||||
{err: &net.AddrError{}, want: false}, // Not temporary.
|
||||
{err: &net.DNSError{IsTimeout: true}, want: true}, // Temporary.
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
if got := shouldRetry(tt.status, tt.err); got != tt.want {
|
||||
t.Errorf("shouldRetry(%d, %v) = %t; want %t", tt.status, tt.err, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
71
vendor/google.golang.org/api/gensupport/send.go
generated
vendored
Normal file
71
vendor/google.golang.org/api/gensupport/send.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
// Hook is the type of a function that is called once before each HTTP request
|
||||
// that is sent by a generated API. It returns a function that is called after
|
||||
// the request returns.
|
||||
// Hooks are not called if the context is nil.
|
||||
type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response)
|
||||
|
||||
var hooks []Hook
|
||||
|
||||
// RegisterHook registers a Hook to be called before each HTTP request by a
|
||||
// generated API. Hooks are called in the order they are registered. Each
|
||||
// hook can return a function; if it is non-nil, it is called after the HTTP
|
||||
// request returns. These functions are called in the reverse order.
|
||||
// RegisterHook should not be called concurrently with itself or SendRequest.
|
||||
func RegisterHook(h Hook) {
|
||||
hooks = append(hooks, h)
|
||||
}
|
||||
|
||||
// SendRequest sends a single HTTP request using the given client.
|
||||
// If ctx is non-nil, it calls all hooks, then sends the request with
|
||||
// ctxhttp.Do, then calls any functions returned by the hooks in reverse order.
|
||||
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
|
||||
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
|
||||
if _, ok := req.Header["Accept-Encoding"]; ok {
|
||||
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
|
||||
}
|
||||
if ctx == nil {
|
||||
return client.Do(req)
|
||||
}
|
||||
// Call hooks in order of registration, store returned funcs.
|
||||
post := make([]func(resp *http.Response), len(hooks))
|
||||
for i, h := range hooks {
|
||||
fn := h(ctx, req)
|
||||
post[i] = fn
|
||||
}
|
||||
|
||||
// Send request.
|
||||
resp, err := ctxhttp.Do(ctx, client, req)
|
||||
|
||||
// Call returned funcs in reverse order.
|
||||
for i := len(post) - 1; i >= 0; i-- {
|
||||
if fn := post[i]; fn != nil {
|
||||
fn(resp)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DecodeResponse decodes the body of res into target. If there is no body,
|
||||
// target is unchanged.
|
||||
func DecodeResponse(target interface{}, res *http.Response) error {
|
||||
if res.StatusCode == http.StatusNoContent {
|
||||
return nil
|
||||
}
|
||||
return json.NewDecoder(res.Body).Decode(target)
|
||||
}
|
20
vendor/google.golang.org/api/gensupport/send_test.go
generated
vendored
Normal file
20
vendor/google.golang.org/api/gensupport/send_test.go
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSendRequest(t *testing.T) {
|
||||
// Setting Accept-Encoding should give an error immediately.
|
||||
req, _ := http.NewRequest("GET", "url", nil)
|
||||
req.Header.Set("Accept-Encoding", "")
|
||||
_, err := SendRequest(nil, nil, req)
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
57
vendor/google.golang.org/api/gensupport/util_test.go
generated
vendored
Normal file
57
vendor/google.golang.org/api/gensupport/util_test.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
// errReader reads out of a buffer until it is empty, then returns the specified error.
|
||||
type errReader struct {
|
||||
buf []byte
|
||||
err error
|
||||
}
|
||||
|
||||
func (er *errReader) Read(p []byte) (int, error) {
|
||||
if len(er.buf) == 0 {
|
||||
if er.err == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
return 0, er.err
|
||||
}
|
||||
n := copy(p, er.buf)
|
||||
er.buf = er.buf[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// UniformPauseStrategy implements BackoffStrategy with uniform pause.
|
||||
type UniformPauseStrategy time.Duration
|
||||
|
||||
func (p UniformPauseStrategy) Pause() (time.Duration, bool) { return time.Duration(p), true }
|
||||
func (p UniformPauseStrategy) Reset() {}
|
||||
|
||||
// NoPauseStrategy implements BackoffStrategy with infinite 0-length pauses.
|
||||
const NoPauseStrategy = UniformPauseStrategy(0)
|
||||
|
||||
// LimitRetryStrategy wraps a BackoffStrategy but limits the number of retries.
|
||||
type LimitRetryStrategy struct {
|
||||
Max int
|
||||
Strategy BackoffStrategy
|
||||
n int
|
||||
}
|
||||
|
||||
func (l *LimitRetryStrategy) Pause() (time.Duration, bool) {
|
||||
l.n++
|
||||
if l.n > l.Max {
|
||||
return 0, false
|
||||
}
|
||||
return l.Strategy.Pause()
|
||||
}
|
||||
|
||||
func (l *LimitRetryStrategy) Reset() {
|
||||
l.n = 0
|
||||
l.Strategy.Reset()
|
||||
}
|
Reference in New Issue
Block a user